[VOL-1386]  This commit add "dep" as the package management tool
for voltha-go.

Change-Id: I52bc4911dd00a441756ec7c30f46d45091f3f90e
diff --git a/vendor/github.com/DataDog/zstd/.travis.yml b/vendor/github.com/DataDog/zstd/.travis.yml
new file mode 100644
index 0000000..c5aa33d
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/.travis.yml
@@ -0,0 +1,30 @@
+language: go
+
+go:
+  - 1.9.x
+  - 1.10.x
+  - 1.11.x
+
+os:
+  - linux
+  - osx
+
+matrix:
+  include:
+    name: "Go 1.11.x CentOS 32bits"
+    language: go
+    go: 1.11.x
+    os: linux
+    services:
+      - docker
+    script:
+      # Please update Go version in travis_test_32 as needed
+      - "docker run -i -v \"${PWD}:/zstd\" toopher/centos-i386:centos6 /bin/bash -c \"linux32 --32bit i386 /zstd/travis_test_32.sh\""
+
+install:
+  - "wget https://github.com/DataDog/zstd/files/2246767/mr.zip"
+  - "unzip mr.zip"
+script:
+  - "go build"
+  - "PAYLOAD=`pwd`/mr go test -v"
+  - "PAYLOAD=`pwd`/mr go test -bench ."
diff --git a/vendor/github.com/DataDog/zstd/LICENSE b/vendor/github.com/DataDog/zstd/LICENSE
new file mode 100644
index 0000000..345c1eb
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/LICENSE
@@ -0,0 +1,27 @@
+Simplified BSD License
+
+Copyright (c) 2016, Datadog <info@datadoghq.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice,
+      this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+    * Neither the name of the copyright holder nor the names of its contributors
+      may be used to endorse or promote products derived from this software
+      without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/DataDog/zstd/README.md b/vendor/github.com/DataDog/zstd/README.md
new file mode 100644
index 0000000..6c02e16
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/README.md
@@ -0,0 +1,120 @@
+# Zstd Go Wrapper
+
+[C Zstd Homepage](https://github.com/Cyan4973/zstd)
+
+The current headers and C files are from *v1.3.4* (Commit
+[2555975](https://github.com/facebook/zstd/releases/tag/v1.3.4)).
+
+## Usage
+
+There are two main APIs:
+
+* simple Compress/Decompress
+* streaming API (io.Reader/io.Writer)
+
+The compress/decompress APIs mirror that of lz4, while the streaming API was
+designed to be a drop-in replacement for zlib.
+
+### Simple `Compress/Decompress`
+
+
+```go
+// Compress compresses the byte array given in src and writes it to dst.
+// If you already have a buffer allocated, you can pass it to prevent allocation
+// If not, you can pass nil as dst.
+// If the buffer is too small, it will be reallocated, resized, and returned bu the function
+// If dst is nil, this will allocate the worst case size (CompressBound(src))
+Compress(dst, src []byte) ([]byte, error)
+```
+
+```go
+// CompressLevel is the same as Compress but you can pass another compression level
+CompressLevel(dst, src []byte, level int) ([]byte, error)
+```
+
+```go
+// Decompress will decompress your payload into dst.
+// If you already have a buffer allocated, you can pass it to prevent allocation
+// If not, you can pass nil as dst (allocates a 4*src size as default).
+// If the buffer is too small, it will retry 3 times by doubling the dst size
+// After max retries, it will switch to the slower stream API to be sure to be able
+// to decompress. Currently switches if compression ratio > 4*2**3=32.
+Decompress(dst, src []byte) ([]byte, error)
+```
+
+### Stream API
+
+```go
+// NewWriter creates a new object that can optionally be initialized with
+// a precomputed dictionary. If dict is nil, compress without a dictionary.
+// The dictionary array should not be changed during the use of this object.
+// You MUST CALL Close() to write the last bytes of a zstd stream and free C objects.
+NewWriter(w io.Writer) *Writer
+NewWriterLevel(w io.Writer, level int) *Writer
+NewWriterLevelDict(w io.Writer, level int, dict []byte) *Writer
+
+// Write compresses the input data and write it to the underlying writer
+(w *Writer) Write(p []byte) (int, error)
+
+// Close flushes the buffer and frees C zstd objects
+(w *Writer) Close() error
+```
+
+```go
+// NewReader returns a new io.ReadCloser that will decompress data from the
+// underlying reader.  If a dictionary is provided to NewReaderDict, it must
+// not be modified until Close is called.  It is the caller's responsibility
+// to call Close, which frees up C objects.
+NewReader(r io.Reader) io.ReadCloser
+NewReaderDict(r io.Reader, dict []byte) io.ReadCloser
+```
+
+### Benchmarks (benchmarked with v0.5.0)
+
+The author of Zstd also wrote lz4. Zstd is intended to occupy a speed/ratio
+level similar to what zlib currently provides.  In our tests, the can always
+be made to be better than zlib by chosing an appropriate level while still
+keeping compression and decompression time faster than zlib.
+
+You can run the benchmarks against your own payloads by using the Go benchmarks tool.
+Just export your payload filepath as the `PAYLOAD` environment variable and run the benchmarks:
+
+```go
+go test -bench .
+```
+
+Compression of a 7Mb pdf zstd (this wrapper) vs [czlib](https://github.com/DataDog/czlib):
+```
+BenchmarkCompression               5     221056624 ns/op      67.34 MB/s
+BenchmarkDecompression           100      18370416 ns/op     810.32 MB/s
+
+BenchmarkFzlibCompress             2     610156603 ns/op      24.40 MB/s
+BenchmarkFzlibDecompress          20      81195246 ns/op     183.33 MB/s
+```
+
+Ratio is also better by a margin of ~20%.
+Compression speed is always better than zlib on all the payloads we tested;
+However, [czlib](https://github.com/DataDog/czlib) has optimisations that make it
+faster at decompressiong small payloads:
+
+```
+Testing with size: 11... czlib: 8.97 MB/s, zstd: 3.26 MB/s
+Testing with size: 27... czlib: 23.3 MB/s, zstd: 8.22 MB/s
+Testing with size: 62... czlib: 31.6 MB/s, zstd: 19.49 MB/s
+Testing with size: 141... czlib: 74.54 MB/s, zstd: 42.55 MB/s
+Testing with size: 323... czlib: 155.14 MB/s, zstd: 99.39 MB/s
+Testing with size: 739... czlib: 235.9 MB/s, zstd: 216.45 MB/s
+Testing with size: 1689... czlib: 116.45 MB/s, zstd: 345.64 MB/s
+Testing with size: 3858... czlib: 176.39 MB/s, zstd: 617.56 MB/s
+Testing with size: 8811... czlib: 254.11 MB/s, zstd: 824.34 MB/s
+Testing with size: 20121... czlib: 197.43 MB/s, zstd: 1339.11 MB/s
+Testing with size: 45951... czlib: 201.62 MB/s, zstd: 1951.57 MB/s
+```
+
+zstd starts to shine with payloads > 1KB
+
+### Stability - Current state: STABLE
+
+The C library seems to be pretty stable and according to the author has been tested and fuzzed.
+
+For the Go wrapper, the test cover most usual cases and we have succesfully tested it on all staging and prod data.
diff --git a/vendor/github.com/DataDog/zstd/ZSTD_LICENSE b/vendor/github.com/DataDog/zstd/ZSTD_LICENSE
new file mode 100644
index 0000000..a793a80
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/ZSTD_LICENSE
@@ -0,0 +1,30 @@
+BSD License
+
+For Zstandard software
+
+Copyright (c) 2016-present, Facebook, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+ * Neither the name Facebook nor the names of its contributors may be used to
+   endorse or promote products derived from this software without specific
+   prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/DataDog/zstd/bitstream.h b/vendor/github.com/DataDog/zstd/bitstream.h
new file mode 100644
index 0000000..f7f389f
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/bitstream.h
@@ -0,0 +1,471 @@
+/* ******************************************************************
+   bitstream
+   Part of FSE library
+   header file (to include)
+   Copyright (C) 2013-2017, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef BITSTREAM_H_MODULE
+#define BITSTREAM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*
+*  This API consists of small unitary functions, which must be inlined for best performance.
+*  Since link-time-optimization is not available for all compilers,
+*  these functions are defined into a .h to be included.
+*/
+
+/*-****************************************
+*  Dependencies
+******************************************/
+#include "mem.h"            /* unaligned access routines */
+#include "error_private.h"  /* error codes and messages */
+
+
+/*-*************************************
+*  Debug
+***************************************/
+#if defined(BIT_DEBUG) && (BIT_DEBUG>=1)
+#  include <assert.h>
+#else
+#  ifndef assert
+#    define assert(condition) ((void)0)
+#  endif
+#endif
+
+
+/*=========================================
+*  Target specific
+=========================================*/
+#if defined(__BMI__) && defined(__GNUC__)
+#  include <immintrin.h>   /* support for bextr (experimental) */
+#endif
+
+#define STREAM_ACCUMULATOR_MIN_32  25
+#define STREAM_ACCUMULATOR_MIN_64  57
+#define STREAM_ACCUMULATOR_MIN    ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
+
+
+/*-******************************************
+*  bitStream encoding API (write forward)
+********************************************/
+/* bitStream can mix input from multiple sources.
+ * A critical property of these streams is that they encode and decode in **reverse** direction.
+ * So the first bit sequence you add will be the last to be read, like a LIFO stack.
+ */
+typedef struct
+{
+    size_t bitContainer;
+    unsigned bitPos;
+    char*  startPtr;
+    char*  ptr;
+    char*  endPtr;
+} BIT_CStream_t;
+
+MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity);
+MEM_STATIC void   BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
+MEM_STATIC void   BIT_flushBits(BIT_CStream_t* bitC);
+MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
+
+/* Start with initCStream, providing the size of buffer to write into.
+*  bitStream will never write outside of this buffer.
+*  `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
+*
+*  bits are first added to a local register.
+*  Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
+*  Writing data into memory is an explicit operation, performed by the flushBits function.
+*  Hence keep track how many bits are potentially stored into local register to avoid register overflow.
+*  After a flushBits, a maximum of 7 bits might still be stored into local register.
+*
+*  Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
+*
+*  Last operation is to close the bitStream.
+*  The function returns the final size of CStream in bytes.
+*  If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
+*/
+
+
+/*-********************************************
+*  bitStream decoding API (read backward)
+**********************************************/
+typedef struct
+{
+    size_t   bitContainer;
+    unsigned bitsConsumed;
+    const char* ptr;
+    const char* start;
+    const char* limitPtr;
+} BIT_DStream_t;
+
+typedef enum { BIT_DStream_unfinished = 0,
+               BIT_DStream_endOfBuffer = 1,
+               BIT_DStream_completed = 2,
+               BIT_DStream_overflow = 3 } BIT_DStream_status;  /* result of BIT_reloadDStream() */
+               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+
+MEM_STATIC size_t   BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
+MEM_STATIC size_t   BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
+
+
+/* Start by invoking BIT_initDStream().
+*  A chunk of the bitStream is then stored into a local register.
+*  Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
+*  You can then retrieve bitFields stored into the local register, **in reverse order**.
+*  Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
+*  A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
+*  Otherwise, it can be less than that, so proceed accordingly.
+*  Checking if DStream has reached its end can be performed with BIT_endOfDStream().
+*/
+
+
+/*-****************************************
+*  unsafe API
+******************************************/
+MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
+/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
+
+MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
+/* unsafe version; does not check buffer overflow */
+
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
+/* faster, but works only if nbBits >= 1 */
+
+
+
+/*-**************************************************************
+*  Internal functions
+****************************************************************/
+MEM_STATIC unsigned BIT_highbit32 (U32 val)
+{
+    assert(val != 0);
+    {
+#   if defined(_MSC_VER)   /* Visual */
+        unsigned long r=0;
+        _BitScanReverse ( &r, val );
+        return (unsigned) r;
+#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */
+        return 31 - __builtin_clz (val);
+#   else   /* Software version */
+        static const unsigned DeBruijnClz[32] = { 0,  9,  1, 10, 13, 21,  2, 29,
+                                                 11, 14, 16, 18, 22, 25,  3, 30,
+                                                  8, 12, 20, 28, 15, 17, 24,  7,
+                                                 19, 27, 23,  6, 26,  5,  4, 31 };
+        U32 v = val;
+        v |= v >> 1;
+        v |= v >> 2;
+        v |= v >> 4;
+        v |= v >> 8;
+        v |= v >> 16;
+        return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+#   endif
+    }
+}
+
+/*=====    Local Constants   =====*/
+static const unsigned BIT_mask[] = {
+    0,          1,         3,         7,         0xF,       0x1F,
+    0x3F,       0x7F,      0xFF,      0x1FF,     0x3FF,     0x7FF,
+    0xFFF,      0x1FFF,    0x3FFF,    0x7FFF,    0xFFFF,    0x1FFFF,
+    0x3FFFF,    0x7FFFF,   0xFFFFF,   0x1FFFFF,  0x3FFFFF,  0x7FFFFF,
+    0xFFFFFF,   0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF,
+    0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */
+#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0]))
+
+/*-**************************************************************
+*  bitStream encoding
+****************************************************************/
+/*! BIT_initCStream() :
+ *  `dstCapacity` must be > sizeof(size_t)
+ *  @return : 0 if success,
+ *            otherwise an error code (can be tested using ERR_isError()) */
+MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
+                                  void* startPtr, size_t dstCapacity)
+{
+    bitC->bitContainer = 0;
+    bitC->bitPos = 0;
+    bitC->startPtr = (char*)startPtr;
+    bitC->ptr = bitC->startPtr;
+    bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer);
+    if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall);
+    return 0;
+}
+
+/*! BIT_addBits() :
+ *  can add up to 31 bits into `bitC`.
+ *  Note : does not check for register overflow ! */
+MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
+                            size_t value, unsigned nbBits)
+{
+    MEM_STATIC_ASSERT(BIT_MASK_SIZE == 32);
+    assert(nbBits < BIT_MASK_SIZE);
+    assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+    bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
+    bitC->bitPos += nbBits;
+}
+
+/*! BIT_addBitsFast() :
+ *  works only if `value` is _clean_, meaning all high bits above nbBits are 0 */
+MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
+                                size_t value, unsigned nbBits)
+{
+    assert((value>>nbBits) == 0);
+    assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+    bitC->bitContainer |= value << bitC->bitPos;
+    bitC->bitPos += nbBits;
+}
+
+/*! BIT_flushBitsFast() :
+ *  assumption : bitContainer has not overflowed
+ *  unsafe version; does not check buffer overflow */
+MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
+{
+    size_t const nbBytes = bitC->bitPos >> 3;
+    assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+    MEM_writeLEST(bitC->ptr, bitC->bitContainer);
+    bitC->ptr += nbBytes;
+    assert(bitC->ptr <= bitC->endPtr);
+    bitC->bitPos &= 7;
+    bitC->bitContainer >>= nbBytes*8;
+}
+
+/*! BIT_flushBits() :
+ *  assumption : bitContainer has not overflowed
+ *  safe version; check for buffer overflow, and prevents it.
+ *  note : does not signal buffer overflow.
+ *  overflow will be revealed later on using BIT_closeCStream() */
+MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
+{
+    size_t const nbBytes = bitC->bitPos >> 3;
+    assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8);
+    MEM_writeLEST(bitC->ptr, bitC->bitContainer);
+    bitC->ptr += nbBytes;
+    if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
+    bitC->bitPos &= 7;
+    bitC->bitContainer >>= nbBytes*8;
+}
+
+/*! BIT_closeCStream() :
+ *  @return : size of CStream, in bytes,
+ *            or 0 if it could not fit into dstBuffer */
+MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
+{
+    BIT_addBitsFast(bitC, 1, 1);   /* endMark */
+    BIT_flushBits(bitC);
+    if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
+    return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
+}
+
+
+/*-********************************************************
+*  bitStream decoding
+**********************************************************/
+/*! BIT_initDStream() :
+ *  Initialize a BIT_DStream_t.
+ * `bitD` : a pointer to an already allocated BIT_DStream_t structure.
+ * `srcSize` must be the *exact* size of the bitStream, in bytes.
+ * @return : size of stream (== srcSize), or an errorCode if a problem is detected
+ */
+MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
+{
+    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
+
+    bitD->start = (const char*)srcBuffer;
+    bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
+
+    if (srcSize >=  sizeof(bitD->bitContainer)) {  /* normal case */
+        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);
+        { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+          bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;  /* ensures bitsConsumed is always set */
+          if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
+    } else {
+        bitD->ptr   = bitD->start;
+        bitD->bitContainer = *(const BYTE*)(bitD->start);
+        switch(srcSize)
+        {
+        case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
+                /* fall-through */
+
+        case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
+                /* fall-through */
+
+        case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
+                /* fall-through */
+
+        case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
+                /* fall-through */
+
+        case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
+                /* fall-through */
+
+        case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) <<  8;
+                /* fall-through */
+
+        default: break;
+        }
+        {   BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+            bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
+            if (lastByte == 0) return ERROR(corruption_detected);  /* endMark not present */
+        }
+        bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
+    }
+
+    return srcSize;
+}
+
+MEM_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
+{
+    return bitContainer >> start;
+}
+
+MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
+{
+#if defined(__BMI__) && defined(__GNUC__) && __GNUC__*1000+__GNUC_MINOR__ >= 4008  /* experimental */
+#  if defined(__x86_64__)
+    if (sizeof(bitContainer)==8)
+        return _bextr_u64(bitContainer, start, nbBits);
+    else
+#  endif
+        return _bextr_u32(bitContainer, start, nbBits);
+#else
+    assert(nbBits < BIT_MASK_SIZE);
+    return (bitContainer >> start) & BIT_mask[nbBits];
+#endif
+}
+
+MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
+{
+    assert(nbBits < BIT_MASK_SIZE);
+    return bitContainer & BIT_mask[nbBits];
+}
+
+/*! BIT_lookBits() :
+ *  Provides next n bits from local register.
+ *  local register is not modified.
+ *  On 32-bits, maxNbBits==24.
+ *  On 64-bits, maxNbBits==56.
+ * @return : value extracted */
+MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
+{
+#if defined(__BMI__) && defined(__GNUC__)   /* experimental; fails if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8 */
+    return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
+#else
+    U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
+    return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask);
+#endif
+}
+
+/*! BIT_lookBitsFast() :
+ *  unsafe version; only works if nbBits >= 1 */
+MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
+{
+    U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
+    assert(nbBits >= 1);
+    return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
+}
+
+MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+    bitD->bitsConsumed += nbBits;
+}
+
+/*! BIT_readBits() :
+ *  Read (consume) next n bits from local register and update.
+ *  Pay attention to not read more than nbBits contained into local register.
+ * @return : extracted value. */
+MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+    size_t const value = BIT_lookBits(bitD, nbBits);
+    BIT_skipBits(bitD, nbBits);
+    return value;
+}
+
+/*! BIT_readBitsFast() :
+ *  unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
+{
+    size_t const value = BIT_lookBitsFast(bitD, nbBits);
+    assert(nbBits >= 1);
+    BIT_skipBits(bitD, nbBits);
+    return value;
+}
+
+/*! BIT_reloadDStream() :
+ *  Refill `bitD` from buffer previously set in BIT_initDStream() .
+ *  This function is safe, it guarantees it will not read beyond src buffer.
+ * @return : status of `BIT_DStream_t` internal register.
+ *           when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
+{
+    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* overflow detected, like end of stream */
+        return BIT_DStream_overflow;
+
+    if (bitD->ptr >= bitD->limitPtr) {
+        bitD->ptr -= bitD->bitsConsumed >> 3;
+        bitD->bitsConsumed &= 7;
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);
+        return BIT_DStream_unfinished;
+    }
+    if (bitD->ptr == bitD->start) {
+        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
+        return BIT_DStream_completed;
+    }
+    /* start < ptr < limitPtr */
+    {   U32 nbBytes = bitD->bitsConsumed >> 3;
+        BIT_DStream_status result = BIT_DStream_unfinished;
+        if (bitD->ptr - nbBytes < bitD->start) {
+            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */
+            result = BIT_DStream_endOfBuffer;
+        }
+        bitD->ptr -= nbBytes;
+        bitD->bitsConsumed -= nbBytes*8;
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */
+        return result;
+    }
+}
+
+/*! BIT_endOfDStream() :
+ * @return : 1 if DStream has _exactly_ reached its end (all bits consumed).
+ */
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
+{
+    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* BITSTREAM_H_MODULE */
diff --git a/vendor/github.com/DataDog/zstd/compiler.h b/vendor/github.com/DataDog/zstd/compiler.h
new file mode 100644
index 0000000..e90a3bc
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/compiler.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMPILER_H
+#define ZSTD_COMPILER_H
+
+/*-*******************************************************
+*  Compiler specifics
+*********************************************************/
+/* force inlining */
+#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
+#  define INLINE_KEYWORD inline
+#else
+#  define INLINE_KEYWORD
+#endif
+
+#if defined(__GNUC__)
+#  define FORCE_INLINE_ATTR __attribute__((always_inline))
+#elif defined(_MSC_VER)
+#  define FORCE_INLINE_ATTR __forceinline
+#else
+#  define FORCE_INLINE_ATTR
+#endif
+
+/**
+ * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
+ * parameters. They must be inlined for the compiler to elimininate the constant
+ * branches.
+ */
+#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
+/**
+ * HINT_INLINE is used to help the compiler generate better code. It is *not*
+ * used for "templates", so it can be tweaked based on the compilers
+ * performance.
+ *
+ * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
+ * always_inline attribute.
+ *
+ * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
+ * attribute.
+ */
+#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
+#  define HINT_INLINE static INLINE_KEYWORD
+#else
+#  define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
+#endif
+
+/* force no inlining */
+#ifdef _MSC_VER
+#  define FORCE_NOINLINE static __declspec(noinline)
+#else
+#  ifdef __GNUC__
+#    define FORCE_NOINLINE static __attribute__((__noinline__))
+#  else
+#    define FORCE_NOINLINE static
+#  endif
+#endif
+
+/* target attribute */
+#ifndef __has_attribute
+  #define __has_attribute(x) 0  /* Compatibility with non-clang compilers. */
+#endif
+#if defined(__GNUC__)
+#  define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
+#else
+#  define TARGET_ATTRIBUTE(target)
+#endif
+
+/* Enable runtime BMI2 dispatch based on the CPU.
+ * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
+ */
+#ifndef DYNAMIC_BMI2
+  #if (defined(__clang__) && __has_attribute(__target__)) \
+      || (defined(__GNUC__) \
+          && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) \
+      && (defined(__x86_64__) || defined(_M_X86)) \
+      && !defined(__BMI2__)
+  #  define DYNAMIC_BMI2 1
+  #else
+  #  define DYNAMIC_BMI2 0
+  #endif
+#endif
+
+/* prefetch */
+#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86))  /* _mm_prefetch() is not defined outside of x86/x64 */
+#  include <mmintrin.h>   /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
+#  define PREFETCH(ptr)   _mm_prefetch((const char*)ptr, _MM_HINT_T0)
+#elif defined(__GNUC__)
+#  define PREFETCH(ptr)   __builtin_prefetch(ptr, 0, 0)
+#else
+#  define PREFETCH(ptr)   /* disabled */
+#endif
+
+/* disable warnings */
+#ifdef _MSC_VER    /* Visual Studio */
+#  include <intrin.h>                    /* For Visual 2005 */
+#  pragma warning(disable : 4100)        /* disable: C4100: unreferenced formal parameter */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#  pragma warning(disable : 4204)        /* disable: C4204: non-constant aggregate initializer */
+#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */
+#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */
+#endif
+
+#endif /* ZSTD_COMPILER_H */
diff --git a/vendor/github.com/DataDog/zstd/cover.c b/vendor/github.com/DataDog/zstd/cover.c
new file mode 100644
index 0000000..b5a3957
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/cover.c
@@ -0,0 +1,1048 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* *****************************************************************************
+ * Constructs a dictionary using a heuristic based on the following paper:
+ *
+ * Liao, Petri, Moffat, Wirth
+ * Effective Construction of Relative Lempel-Ziv Dictionaries
+ * Published in WWW 2016.
+ *
+ * Adapted from code originally written by @ot (Giuseppe Ottaviano).
+ ******************************************************************************/
+
+/*-*************************************
+*  Dependencies
+***************************************/
+#include <stdio.h>  /* fprintf */
+#include <stdlib.h> /* malloc, free, qsort */
+#include <string.h> /* memset */
+#include <time.h>   /* clock */
+
+#include "mem.h" /* read */
+#include "pool.h"
+#include "threading.h"
+#include "zstd_internal.h" /* includes zstd.h */
+#ifndef ZDICT_STATIC_LINKING_ONLY
+#define ZDICT_STATIC_LINKING_ONLY
+#endif
+#include "zdict.h"
+
+/*-*************************************
+*  Constants
+***************************************/
+#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((U32)-1) : ((U32)1 GB))
+
+/*-*************************************
+*  Console display
+***************************************/
+static int g_displayLevel = 2;
+#define DISPLAY(...)                                                           \
+  {                                                                            \
+    fprintf(stderr, __VA_ARGS__);                                              \
+    fflush(stderr);                                                            \
+  }
+#define LOCALDISPLAYLEVEL(displayLevel, l, ...)                                \
+  if (displayLevel >= l) {                                                     \
+    DISPLAY(__VA_ARGS__);                                                      \
+  } /* 0 : no display;   1: errors;   2: default;  3: details;  4: debug */
+#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__)
+
+#define LOCALDISPLAYUPDATE(displayLevel, l, ...)                               \
+  if (displayLevel >= l) {                                                     \
+    if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) {             \
+      g_time = clock();                                                        \
+      DISPLAY(__VA_ARGS__);                                                    \
+    }                                                                          \
+  }
+#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)
+static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;
+static clock_t g_time = 0;
+
+/*-*************************************
+* Hash table
+***************************************
+* A small specialized hash map for storing activeDmers.
+* The map does not resize, so if it becomes full it will loop forever.
+* Thus, the map must be large enough to store every value.
+* The map implements linear probing and keeps its load less than 0.5.
+*/
+
+#define MAP_EMPTY_VALUE ((U32)-1)
+typedef struct COVER_map_pair_t_s {
+  U32 key;
+  U32 value;
+} COVER_map_pair_t;
+
+typedef struct COVER_map_s {
+  COVER_map_pair_t *data;
+  U32 sizeLog;
+  U32 size;
+  U32 sizeMask;
+} COVER_map_t;
+
+/**
+ * Clear the map.
+ */
+static void COVER_map_clear(COVER_map_t *map) {
+  memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t));
+}
+
+/**
+ * Initializes a map of the given size.
+ * Returns 1 on success and 0 on failure.
+ * The map must be destroyed with COVER_map_destroy().
+ * The map is only guaranteed to be large enough to hold size elements.
+ */
+static int COVER_map_init(COVER_map_t *map, U32 size) {
+  map->sizeLog = ZSTD_highbit32(size) + 2;
+  map->size = (U32)1 << map->sizeLog;
+  map->sizeMask = map->size - 1;
+  map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t));
+  if (!map->data) {
+    map->sizeLog = 0;
+    map->size = 0;
+    return 0;
+  }
+  COVER_map_clear(map);
+  return 1;
+}
+
+/**
+ * Internal hash function
+ */
+static const U32 prime4bytes = 2654435761U;
+static U32 COVER_map_hash(COVER_map_t *map, U32 key) {
+  return (key * prime4bytes) >> (32 - map->sizeLog);
+}
+
+/**
+ * Helper function that returns the index that a key should be placed into.
+ */
+static U32 COVER_map_index(COVER_map_t *map, U32 key) {
+  const U32 hash = COVER_map_hash(map, key);
+  U32 i;
+  for (i = hash;; i = (i + 1) & map->sizeMask) {
+    COVER_map_pair_t *pos = &map->data[i];
+    if (pos->value == MAP_EMPTY_VALUE) {
+      return i;
+    }
+    if (pos->key == key) {
+      return i;
+    }
+  }
+}
+
+/**
+ * Returns the pointer to the value for key.
+ * If key is not in the map, it is inserted and the value is set to 0.
+ * The map must not be full.
+ */
+static U32 *COVER_map_at(COVER_map_t *map, U32 key) {
+  COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)];
+  if (pos->value == MAP_EMPTY_VALUE) {
+    pos->key = key;
+    pos->value = 0;
+  }
+  return &pos->value;
+}
+
+/**
+ * Deletes key from the map if present.
+ */
+static void COVER_map_remove(COVER_map_t *map, U32 key) {
+  U32 i = COVER_map_index(map, key);
+  COVER_map_pair_t *del = &map->data[i];
+  U32 shift = 1;
+  if (del->value == MAP_EMPTY_VALUE) {
+    return;
+  }
+  for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) {
+    COVER_map_pair_t *const pos = &map->data[i];
+    /* If the position is empty we are done */
+    if (pos->value == MAP_EMPTY_VALUE) {
+      del->value = MAP_EMPTY_VALUE;
+      return;
+    }
+    /* If pos can be moved to del do so */
+    if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) {
+      del->key = pos->key;
+      del->value = pos->value;
+      del = pos;
+      shift = 1;
+    } else {
+      ++shift;
+    }
+  }
+}
+
+/**
+ * Destroyes a map that is inited with COVER_map_init().
+ */
+static void COVER_map_destroy(COVER_map_t *map) {
+  if (map->data) {
+    free(map->data);
+  }
+  map->data = NULL;
+  map->size = 0;
+}
+
+/*-*************************************
+* Context
+***************************************/
+
+typedef struct {
+  const BYTE *samples;
+  size_t *offsets;
+  const size_t *samplesSizes;
+  size_t nbSamples;
+  U32 *suffix;
+  size_t suffixSize;
+  U32 *freqs;
+  U32 *dmerAt;
+  unsigned d;
+} COVER_ctx_t;
+
+/* We need a global context for qsort... */
+static COVER_ctx_t *g_ctx = NULL;
+
+/*-*************************************
+*  Helper functions
+***************************************/
+
+/**
+ * Returns the sum of the sample sizes.
+ */
+static size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) {
+  size_t sum = 0;
+  size_t i;
+  for (i = 0; i < nbSamples; ++i) {
+    sum += samplesSizes[i];
+  }
+  return sum;
+}
+
+/**
+ * Returns -1 if the dmer at lp is less than the dmer at rp.
+ * Return 0 if the dmers at lp and rp are equal.
+ * Returns 1 if the dmer at lp is greater than the dmer at rp.
+ */
+static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) {
+  U32 const lhs = *(U32 const *)lp;
+  U32 const rhs = *(U32 const *)rp;
+  return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d);
+}
+/**
+ * Faster version for d <= 8.
+ */
+static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) {
+  U64 const mask = (ctx->d == 8) ? (U64)-1 : (((U64)1 << (8 * ctx->d)) - 1);
+  U64 const lhs = MEM_readLE64(ctx->samples + *(U32 const *)lp) & mask;
+  U64 const rhs = MEM_readLE64(ctx->samples + *(U32 const *)rp) & mask;
+  if (lhs < rhs) {
+    return -1;
+  }
+  return (lhs > rhs);
+}
+
+/**
+ * Same as COVER_cmp() except ties are broken by pointer value
+ * NOTE: g_ctx must be set to call this function.  A global is required because
+ * qsort doesn't take an opaque pointer.
+ */
+static int COVER_strict_cmp(const void *lp, const void *rp) {
+  int result = COVER_cmp(g_ctx, lp, rp);
+  if (result == 0) {
+    result = lp < rp ? -1 : 1;
+  }
+  return result;
+}
+/**
+ * Faster version for d <= 8.
+ */
+static int COVER_strict_cmp8(const void *lp, const void *rp) {
+  int result = COVER_cmp8(g_ctx, lp, rp);
+  if (result == 0) {
+    result = lp < rp ? -1 : 1;
+  }
+  return result;
+}
+
+/**
+ * Returns the first pointer in [first, last) whose element does not compare
+ * less than value.  If no such element exists it returns last.
+ */
+static const size_t *COVER_lower_bound(const size_t *first, const size_t *last,
+                                       size_t value) {
+  size_t count = last - first;
+  while (count != 0) {
+    size_t step = count / 2;
+    const size_t *ptr = first;
+    ptr += step;
+    if (*ptr < value) {
+      first = ++ptr;
+      count -= step + 1;
+    } else {
+      count = step;
+    }
+  }
+  return first;
+}
+
+/**
+ * Generic groupBy function.
+ * Groups an array sorted by cmp into groups with equivalent values.
+ * Calls grp for each group.
+ */
+static void
+COVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx,
+              int (*cmp)(COVER_ctx_t *, const void *, const void *),
+              void (*grp)(COVER_ctx_t *, const void *, const void *)) {
+  const BYTE *ptr = (const BYTE *)data;
+  size_t num = 0;
+  while (num < count) {
+    const BYTE *grpEnd = ptr + size;
+    ++num;
+    while (num < count && cmp(ctx, ptr, grpEnd) == 0) {
+      grpEnd += size;
+      ++num;
+    }
+    grp(ctx, ptr, grpEnd);
+    ptr = grpEnd;
+  }
+}
+
+/*-*************************************
+*  Cover functions
+***************************************/
+
+/**
+ * Called on each group of positions with the same dmer.
+ * Counts the frequency of each dmer and saves it in the suffix array.
+ * Fills `ctx->dmerAt`.
+ */
+static void COVER_group(COVER_ctx_t *ctx, const void *group,
+                        const void *groupEnd) {
+  /* The group consists of all the positions with the same first d bytes. */
+  const U32 *grpPtr = (const U32 *)group;
+  const U32 *grpEnd = (const U32 *)groupEnd;
+  /* The dmerId is how we will reference this dmer.
+   * This allows us to map the whole dmer space to a much smaller space, the
+   * size of the suffix array.
+   */
+  const U32 dmerId = (U32)(grpPtr - ctx->suffix);
+  /* Count the number of samples this dmer shows up in */
+  U32 freq = 0;
+  /* Details */
+  const size_t *curOffsetPtr = ctx->offsets;
+  const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples;
+  /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a
+   * different sample than the last.
+   */
+  size_t curSampleEnd = ctx->offsets[0];
+  for (; grpPtr != grpEnd; ++grpPtr) {
+    /* Save the dmerId for this position so we can get back to it. */
+    ctx->dmerAt[*grpPtr] = dmerId;
+    /* Dictionaries only help for the first reference to the dmer.
+     * After that zstd can reference the match from the previous reference.
+     * So only count each dmer once for each sample it is in.
+     */
+    if (*grpPtr < curSampleEnd) {
+      continue;
+    }
+    freq += 1;
+    /* Binary search to find the end of the sample *grpPtr is in.
+     * In the common case that grpPtr + 1 == grpEnd we can skip the binary
+     * search because the loop is over.
+     */
+    if (grpPtr + 1 != grpEnd) {
+      const size_t *sampleEndPtr =
+          COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr);
+      curSampleEnd = *sampleEndPtr;
+      curOffsetPtr = sampleEndPtr + 1;
+    }
+  }
+  /* At this point we are never going to look at this segment of the suffix
+   * array again.  We take advantage of this fact to save memory.
+   * We store the frequency of the dmer in the first position of the group,
+   * which is dmerId.
+   */
+  ctx->suffix[dmerId] = freq;
+}
+
+/**
+ * A segment is a range in the source as well as the score of the segment.
+ */
+typedef struct {
+  U32 begin;
+  U32 end;
+  U32 score;
+} COVER_segment_t;
+
+/**
+ * Selects the best segment in an epoch.
+ * Segments of are scored according to the function:
+ *
+ * Let F(d) be the frequency of dmer d.
+ * Let S_i be the dmer at position i of segment S which has length k.
+ *
+ *     Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
+ *
+ * Once the dmer d is in the dictionay we set F(d) = 0.
+ */
+static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs,
+                                           COVER_map_t *activeDmers, U32 begin,
+                                           U32 end,
+                                           ZDICT_cover_params_t parameters) {
+  /* Constants */
+  const U32 k = parameters.k;
+  const U32 d = parameters.d;
+  const U32 dmersInK = k - d + 1;
+  /* Try each segment (activeSegment) and save the best (bestSegment) */
+  COVER_segment_t bestSegment = {0, 0, 0};
+  COVER_segment_t activeSegment;
+  /* Reset the activeDmers in the segment */
+  COVER_map_clear(activeDmers);
+  /* The activeSegment starts at the beginning of the epoch. */
+  activeSegment.begin = begin;
+  activeSegment.end = begin;
+  activeSegment.score = 0;
+  /* Slide the activeSegment through the whole epoch.
+   * Save the best segment in bestSegment.
+   */
+  while (activeSegment.end < end) {
+    /* The dmerId for the dmer at the next position */
+    U32 newDmer = ctx->dmerAt[activeSegment.end];
+    /* The entry in activeDmers for this dmerId */
+    U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer);
+    /* If the dmer isn't already present in the segment add its score. */
+    if (*newDmerOcc == 0) {
+      /* The paper suggest using the L-0.5 norm, but experiments show that it
+       * doesn't help.
+       */
+      activeSegment.score += freqs[newDmer];
+    }
+    /* Add the dmer to the segment */
+    activeSegment.end += 1;
+    *newDmerOcc += 1;
+
+    /* If the window is now too large, drop the first position */
+    if (activeSegment.end - activeSegment.begin == dmersInK + 1) {
+      U32 delDmer = ctx->dmerAt[activeSegment.begin];
+      U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer);
+      activeSegment.begin += 1;
+      *delDmerOcc -= 1;
+      /* If this is the last occurence of the dmer, subtract its score */
+      if (*delDmerOcc == 0) {
+        COVER_map_remove(activeDmers, delDmer);
+        activeSegment.score -= freqs[delDmer];
+      }
+    }
+
+    /* If this segment is the best so far save it */
+    if (activeSegment.score > bestSegment.score) {
+      bestSegment = activeSegment;
+    }
+  }
+  {
+    /* Trim off the zero frequency head and tail from the segment. */
+    U32 newBegin = bestSegment.end;
+    U32 newEnd = bestSegment.begin;
+    U32 pos;
+    for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
+      U32 freq = freqs[ctx->dmerAt[pos]];
+      if (freq != 0) {
+        newBegin = MIN(newBegin, pos);
+        newEnd = pos + 1;
+      }
+    }
+    bestSegment.begin = newBegin;
+    bestSegment.end = newEnd;
+  }
+  {
+    /* Zero out the frequency of each dmer covered by the chosen segment. */
+    U32 pos;
+    for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
+      freqs[ctx->dmerAt[pos]] = 0;
+    }
+  }
+  return bestSegment;
+}
+
+/**
+ * Check the validity of the parameters.
+ * Returns non-zero if the parameters are valid and 0 otherwise.
+ */
+static int COVER_checkParameters(ZDICT_cover_params_t parameters,
+                                 size_t maxDictSize) {
+  /* k and d are required parameters */
+  if (parameters.d == 0 || parameters.k == 0) {
+    return 0;
+  }
+  /* k <= maxDictSize */
+  if (parameters.k > maxDictSize) {
+    return 0;
+  }
+  /* d <= k */
+  if (parameters.d > parameters.k) {
+    return 0;
+  }
+  return 1;
+}
+
+/**
+ * Clean up a context initialized with `COVER_ctx_init()`.
+ */
+static void COVER_ctx_destroy(COVER_ctx_t *ctx) {
+  if (!ctx) {
+    return;
+  }
+  if (ctx->suffix) {
+    free(ctx->suffix);
+    ctx->suffix = NULL;
+  }
+  if (ctx->freqs) {
+    free(ctx->freqs);
+    ctx->freqs = NULL;
+  }
+  if (ctx->dmerAt) {
+    free(ctx->dmerAt);
+    ctx->dmerAt = NULL;
+  }
+  if (ctx->offsets) {
+    free(ctx->offsets);
+    ctx->offsets = NULL;
+  }
+}
+
+/**
+ * Prepare a context for dictionary building.
+ * The context is only dependent on the parameter `d` and can used multiple
+ * times.
+ * Returns 1 on success or zero on error.
+ * The context must be destroyed with `COVER_ctx_destroy()`.
+ */
+static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
+                          const size_t *samplesSizes, unsigned nbSamples,
+                          unsigned d) {
+  const BYTE *const samples = (const BYTE *)samplesBuffer;
+  const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
+  /* Checks */
+  if (totalSamplesSize < MAX(d, sizeof(U64)) ||
+      totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) {
+    DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
+                 (U32)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20));
+    return 0;
+  }
+  /* Zero the context */
+  memset(ctx, 0, sizeof(*ctx));
+  DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbSamples,
+               (U32)totalSamplesSize);
+  ctx->samples = samples;
+  ctx->samplesSizes = samplesSizes;
+  ctx->nbSamples = nbSamples;
+  /* Partial suffix array */
+  ctx->suffixSize = totalSamplesSize - MAX(d, sizeof(U64)) + 1;
+  ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
+  /* Maps index to the dmerID */
+  ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
+  /* The offsets of each file */
+  ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t));
+  if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) {
+    DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n");
+    COVER_ctx_destroy(ctx);
+    return 0;
+  }
+  ctx->freqs = NULL;
+  ctx->d = d;
+
+  /* Fill offsets from the samlesSizes */
+  {
+    U32 i;
+    ctx->offsets[0] = 0;
+    for (i = 1; i <= nbSamples; ++i) {
+      ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
+    }
+  }
+  DISPLAYLEVEL(2, "Constructing partial suffix array\n");
+  {
+    /* suffix is a partial suffix array.
+     * It only sorts suffixes by their first parameters.d bytes.
+     * The sort is stable, so each dmer group is sorted by position in input.
+     */
+    U32 i;
+    for (i = 0; i < ctx->suffixSize; ++i) {
+      ctx->suffix[i] = i;
+    }
+    /* qsort doesn't take an opaque pointer, so pass as a global */
+    g_ctx = ctx;
+    qsort(ctx->suffix, ctx->suffixSize, sizeof(U32),
+          (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
+  }
+  DISPLAYLEVEL(2, "Computing frequencies\n");
+  /* For each dmer group (group of positions with the same first d bytes):
+   * 1. For each position we set dmerAt[position] = dmerID.  The dmerID is
+   *    (groupBeginPtr - suffix).  This allows us to go from position to
+   *    dmerID so we can look up values in freq.
+   * 2. We calculate how many samples the dmer occurs in and save it in
+   *    freqs[dmerId].
+   */
+  COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx,
+                (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group);
+  ctx->freqs = ctx->suffix;
+  ctx->suffix = NULL;
+  return 1;
+}
+
+/**
+ * Given the prepared context build the dictionary.
+ */
+static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs,
+                                    COVER_map_t *activeDmers, void *dictBuffer,
+                                    size_t dictBufferCapacity,
+                                    ZDICT_cover_params_t parameters) {
+  BYTE *const dict = (BYTE *)dictBuffer;
+  size_t tail = dictBufferCapacity;
+  /* Divide the data up into epochs of equal size.
+   * We will select at least one segment from each epoch.
+   */
+  const U32 epochs = (U32)(dictBufferCapacity / parameters.k);
+  const U32 epochSize = (U32)(ctx->suffixSize / epochs);
+  size_t epoch;
+  DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", epochs,
+               epochSize);
+  /* Loop through the epochs until there are no more segments or the dictionary
+   * is full.
+   */
+  for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs) {
+    const U32 epochBegin = (U32)(epoch * epochSize);
+    const U32 epochEnd = epochBegin + epochSize;
+    size_t segmentSize;
+    /* Select a segment */
+    COVER_segment_t segment = COVER_selectSegment(
+        ctx, freqs, activeDmers, epochBegin, epochEnd, parameters);
+    /* If the segment covers no dmers, then we are out of content */
+    if (segment.score == 0) {
+      break;
+    }
+    /* Trim the segment if necessary and if it is too small then we are done */
+    segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
+    if (segmentSize < parameters.d) {
+      break;
+    }
+    /* We fill the dictionary from the back to allow the best segments to be
+     * referenced with the smallest offsets.
+     */
+    tail -= segmentSize;
+    memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
+    DISPLAYUPDATE(
+        2, "\r%u%%       ",
+        (U32)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
+  }
+  DISPLAYLEVEL(2, "\r%79s\r", "");
+  return tail;
+}
+
+ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
+    void *dictBuffer, size_t dictBufferCapacity,
+    const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
+    ZDICT_cover_params_t parameters)
+{
+  BYTE* const dict = (BYTE*)dictBuffer;
+  COVER_ctx_t ctx;
+  COVER_map_t activeDmers;
+
+  /* Initialize global data */
+  g_displayLevel = parameters.zParams.notificationLevel;
+  /* Checks */
+  if (!COVER_checkParameters(parameters, dictBufferCapacity)) {
+    DISPLAYLEVEL(1, "Cover parameters incorrect\n");
+    return ERROR(GENERIC);
+  }
+  if (nbSamples == 0) {
+    DISPLAYLEVEL(1, "Cover must have at least one input file\n");
+    return ERROR(GENERIC);
+  }
+  if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
+    DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
+                 ZDICT_DICTSIZE_MIN);
+    return ERROR(dstSize_tooSmall);
+  }
+  /* Initialize context and activeDmers */
+  if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
+                      parameters.d)) {
+    return ERROR(GENERIC);
+  }
+  if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
+    DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
+    COVER_ctx_destroy(&ctx);
+    return ERROR(GENERIC);
+  }
+
+  DISPLAYLEVEL(2, "Building dictionary\n");
+  {
+    const size_t tail =
+        COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer,
+                              dictBufferCapacity, parameters);
+    const size_t dictionarySize = ZDICT_finalizeDictionary(
+        dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
+        samplesBuffer, samplesSizes, nbSamples, parameters.zParams);
+    if (!ZSTD_isError(dictionarySize)) {
+      DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
+                   (U32)dictionarySize);
+    }
+    COVER_ctx_destroy(&ctx);
+    COVER_map_destroy(&activeDmers);
+    return dictionarySize;
+  }
+}
+
+/**
+ * COVER_best_t is used for two purposes:
+ * 1. Synchronizing threads.
+ * 2. Saving the best parameters and dictionary.
+ *
+ * All of the methods except COVER_best_init() are thread safe if zstd is
+ * compiled with multithreaded support.
+ */
+typedef struct COVER_best_s {
+  ZSTD_pthread_mutex_t mutex;
+  ZSTD_pthread_cond_t cond;
+  size_t liveJobs;
+  void *dict;
+  size_t dictSize;
+  ZDICT_cover_params_t parameters;
+  size_t compressedSize;
+} COVER_best_t;
+
+/**
+ * Initialize the `COVER_best_t`.
+ */
+static void COVER_best_init(COVER_best_t *best) {
+  if (best==NULL) return; /* compatible with init on NULL */
+  (void)ZSTD_pthread_mutex_init(&best->mutex, NULL);
+  (void)ZSTD_pthread_cond_init(&best->cond, NULL);
+  best->liveJobs = 0;
+  best->dict = NULL;
+  best->dictSize = 0;
+  best->compressedSize = (size_t)-1;
+  memset(&best->parameters, 0, sizeof(best->parameters));
+}
+
+/**
+ * Wait until liveJobs == 0.
+ */
+static void COVER_best_wait(COVER_best_t *best) {
+  if (!best) {
+    return;
+  }
+  ZSTD_pthread_mutex_lock(&best->mutex);
+  while (best->liveJobs != 0) {
+    ZSTD_pthread_cond_wait(&best->cond, &best->mutex);
+  }
+  ZSTD_pthread_mutex_unlock(&best->mutex);
+}
+
+/**
+ * Call COVER_best_wait() and then destroy the COVER_best_t.
+ */
+static void COVER_best_destroy(COVER_best_t *best) {
+  if (!best) {
+    return;
+  }
+  COVER_best_wait(best);
+  if (best->dict) {
+    free(best->dict);
+  }
+  ZSTD_pthread_mutex_destroy(&best->mutex);
+  ZSTD_pthread_cond_destroy(&best->cond);
+}
+
+/**
+ * Called when a thread is about to be launched.
+ * Increments liveJobs.
+ */
+static void COVER_best_start(COVER_best_t *best) {
+  if (!best) {
+    return;
+  }
+  ZSTD_pthread_mutex_lock(&best->mutex);
+  ++best->liveJobs;
+  ZSTD_pthread_mutex_unlock(&best->mutex);
+}
+
+/**
+ * Called when a thread finishes executing, both on error or success.
+ * Decrements liveJobs and signals any waiting threads if liveJobs == 0.
+ * If this dictionary is the best so far save it and its parameters.
+ */
+static void COVER_best_finish(COVER_best_t *best, size_t compressedSize,
+                              ZDICT_cover_params_t parameters, void *dict,
+                              size_t dictSize) {
+  if (!best) {
+    return;
+  }
+  {
+    size_t liveJobs;
+    ZSTD_pthread_mutex_lock(&best->mutex);
+    --best->liveJobs;
+    liveJobs = best->liveJobs;
+    /* If the new dictionary is better */
+    if (compressedSize < best->compressedSize) {
+      /* Allocate space if necessary */
+      if (!best->dict || best->dictSize < dictSize) {
+        if (best->dict) {
+          free(best->dict);
+        }
+        best->dict = malloc(dictSize);
+        if (!best->dict) {
+          best->compressedSize = ERROR(GENERIC);
+          best->dictSize = 0;
+          return;
+        }
+      }
+      /* Save the dictionary, parameters, and size */
+      memcpy(best->dict, dict, dictSize);
+      best->dictSize = dictSize;
+      best->parameters = parameters;
+      best->compressedSize = compressedSize;
+    }
+    ZSTD_pthread_mutex_unlock(&best->mutex);
+    if (liveJobs == 0) {
+      ZSTD_pthread_cond_broadcast(&best->cond);
+    }
+  }
+}
+
+/**
+ * Parameters for COVER_tryParameters().
+ */
+typedef struct COVER_tryParameters_data_s {
+  const COVER_ctx_t *ctx;
+  COVER_best_t *best;
+  size_t dictBufferCapacity;
+  ZDICT_cover_params_t parameters;
+} COVER_tryParameters_data_t;
+
+/**
+ * Tries a set of parameters and upates the COVER_best_t with the results.
+ * This function is thread safe if zstd is compiled with multithreaded support.
+ * It takes its parameters as an *OWNING* opaque pointer to support threading.
+ */
+static void COVER_tryParameters(void *opaque) {
+  /* Save parameters as local variables */
+  COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t *)opaque;
+  const COVER_ctx_t *const ctx = data->ctx;
+  const ZDICT_cover_params_t parameters = data->parameters;
+  size_t dictBufferCapacity = data->dictBufferCapacity;
+  size_t totalCompressedSize = ERROR(GENERIC);
+  /* Allocate space for hash table, dict, and freqs */
+  COVER_map_t activeDmers;
+  BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity);
+  U32 *freqs = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
+  if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
+    DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
+    goto _cleanup;
+  }
+  if (!dict || !freqs) {
+    DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n");
+    goto _cleanup;
+  }
+  /* Copy the frequencies because we need to modify them */
+  memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32));
+  /* Build the dictionary */
+  {
+    const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict,
+                                              dictBufferCapacity, parameters);
+    dictBufferCapacity = ZDICT_finalizeDictionary(
+        dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
+        ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbSamples,
+        parameters.zParams);
+    if (ZDICT_isError(dictBufferCapacity)) {
+      DISPLAYLEVEL(1, "Failed to finalize dictionary\n");
+      goto _cleanup;
+    }
+  }
+  /* Check total compressed size */
+  {
+    /* Pointers */
+    ZSTD_CCtx *cctx;
+    ZSTD_CDict *cdict;
+    void *dst;
+    /* Local variables */
+    size_t dstCapacity;
+    size_t i;
+    /* Allocate dst with enough space to compress the maximum sized sample */
+    {
+      size_t maxSampleSize = 0;
+      for (i = 0; i < ctx->nbSamples; ++i) {
+        maxSampleSize = MAX(ctx->samplesSizes[i], maxSampleSize);
+      }
+      dstCapacity = ZSTD_compressBound(maxSampleSize);
+      dst = malloc(dstCapacity);
+    }
+    /* Create the cctx and cdict */
+    cctx = ZSTD_createCCtx();
+    cdict = ZSTD_createCDict(dict, dictBufferCapacity,
+                             parameters.zParams.compressionLevel);
+    if (!dst || !cctx || !cdict) {
+      goto _compressCleanup;
+    }
+    /* Compress each sample and sum their sizes (or error) */
+    totalCompressedSize = dictBufferCapacity;
+    for (i = 0; i < ctx->nbSamples; ++i) {
+      const size_t size = ZSTD_compress_usingCDict(
+          cctx, dst, dstCapacity, ctx->samples + ctx->offsets[i],
+          ctx->samplesSizes[i], cdict);
+      if (ZSTD_isError(size)) {
+        totalCompressedSize = ERROR(GENERIC);
+        goto _compressCleanup;
+      }
+      totalCompressedSize += size;
+    }
+  _compressCleanup:
+    ZSTD_freeCCtx(cctx);
+    ZSTD_freeCDict(cdict);
+    if (dst) {
+      free(dst);
+    }
+  }
+
+_cleanup:
+  COVER_best_finish(data->best, totalCompressedSize, parameters, dict,
+                    dictBufferCapacity);
+  free(data);
+  COVER_map_destroy(&activeDmers);
+  if (dict) {
+    free(dict);
+  }
+  if (freqs) {
+    free(freqs);
+  }
+}
+
+ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
+    void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer,
+    const size_t *samplesSizes, unsigned nbSamples,
+    ZDICT_cover_params_t *parameters) {
+  /* constants */
+  const unsigned nbThreads = parameters->nbThreads;
+  const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
+  const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;
+  const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;
+  const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k;
+  const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps;
+  const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);
+  const unsigned kIterations =
+      (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
+  /* Local variables */
+  const int displayLevel = parameters->zParams.notificationLevel;
+  unsigned iteration = 1;
+  unsigned d;
+  unsigned k;
+  COVER_best_t best;
+  POOL_ctx *pool = NULL;
+
+  /* Checks */
+  if (kMinK < kMaxD || kMaxK < kMinK) {
+    LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
+    return ERROR(GENERIC);
+  }
+  if (nbSamples == 0) {
+    DISPLAYLEVEL(1, "Cover must have at least one input file\n");
+    return ERROR(GENERIC);
+  }
+  if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
+    DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
+                 ZDICT_DICTSIZE_MIN);
+    return ERROR(dstSize_tooSmall);
+  }
+  if (nbThreads > 1) {
+    pool = POOL_create(nbThreads, 1);
+    if (!pool) {
+      return ERROR(memory_allocation);
+    }
+  }
+  /* Initialization */
+  COVER_best_init(&best);
+  /* Turn down global display level to clean up display at level 2 and below */
+  g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;
+  /* Loop through d first because each new value needs a new context */
+  LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n",
+                    kIterations);
+  for (d = kMinD; d <= kMaxD; d += 2) {
+    /* Initialize the context for this value of d */
+    COVER_ctx_t ctx;
+    LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d);
+    if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d)) {
+      LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
+      COVER_best_destroy(&best);
+      POOL_free(pool);
+      return ERROR(GENERIC);
+    }
+    /* Loop through k reusing the same context */
+    for (k = kMinK; k <= kMaxK; k += kStepSize) {
+      /* Prepare the arguments */
+      COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc(
+          sizeof(COVER_tryParameters_data_t));
+      LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k);
+      if (!data) {
+        LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n");
+        COVER_best_destroy(&best);
+        COVER_ctx_destroy(&ctx);
+        POOL_free(pool);
+        return ERROR(GENERIC);
+      }
+      data->ctx = &ctx;
+      data->best = &best;
+      data->dictBufferCapacity = dictBufferCapacity;
+      data->parameters = *parameters;
+      data->parameters.k = k;
+      data->parameters.d = d;
+      data->parameters.steps = kSteps;
+      data->parameters.zParams.notificationLevel = g_displayLevel;
+      /* Check the parameters */
+      if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) {
+        DISPLAYLEVEL(1, "Cover parameters incorrect\n");
+        free(data);
+        continue;
+      }
+      /* Call the function and pass ownership of data to it */
+      COVER_best_start(&best);
+      if (pool) {
+        POOL_add(pool, &COVER_tryParameters, data);
+      } else {
+        COVER_tryParameters(data);
+      }
+      /* Print status */
+      LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%%       ",
+                         (U32)((iteration * 100) / kIterations));
+      ++iteration;
+    }
+    COVER_best_wait(&best);
+    COVER_ctx_destroy(&ctx);
+  }
+  LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", "");
+  /* Fill the output buffer and parameters with output of the best parameters */
+  {
+    const size_t dictSize = best.dictSize;
+    if (ZSTD_isError(best.compressedSize)) {
+      const size_t compressedSize = best.compressedSize;
+      COVER_best_destroy(&best);
+      POOL_free(pool);
+      return compressedSize;
+    }
+    *parameters = best.parameters;
+    memcpy(dictBuffer, best.dict, dictSize);
+    COVER_best_destroy(&best);
+    POOL_free(pool);
+    return dictSize;
+  }
+}
diff --git a/vendor/github.com/DataDog/zstd/cpu.h b/vendor/github.com/DataDog/zstd/cpu.h
new file mode 100755
index 0000000..4eb48e3
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/cpu.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2018-present, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_COMMON_CPU_H
+#define ZSTD_COMMON_CPU_H
+
+/**
+ * Implementation taken from folly/CpuId.h
+ * https://github.com/facebook/folly/blob/master/folly/CpuId.h
+ */
+
+#include <string.h>
+
+#include "mem.h"
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif
+
+typedef struct {
+    U32 f1c;
+    U32 f1d;
+    U32 f7b;
+    U32 f7c;
+} ZSTD_cpuid_t;
+
+MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) {
+    U32 f1c = 0;
+    U32 f1d = 0;
+    U32 f7b = 0;
+    U32 f7c = 0;
+#ifdef _MSC_VER
+    int reg[4];
+    __cpuid((int*)reg, 0);
+    {
+        int const n = reg[0];
+        if (n >= 1) {
+            __cpuid((int*)reg, 1);
+            f1c = (U32)reg[2];
+            f1d = (U32)reg[3];
+        }
+        if (n >= 7) {
+            __cpuidex((int*)reg, 7, 0);
+            f7b = (U32)reg[1];
+            f7c = (U32)reg[2];
+        }
+    }
+#elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__)
+    /* The following block like the normal cpuid branch below, but gcc
+     * reserves ebx for use of its pic register so we must specially
+     * handle the save and restore to avoid clobbering the register
+     */
+    U32 n;
+    __asm__(
+        "pushl %%ebx\n\t"
+        "cpuid\n\t"
+        "popl %%ebx\n\t"
+        : "=a"(n)
+        : "a"(0)
+        : "ecx", "edx");
+    if (n >= 1) {
+      U32 f1a;
+      __asm__(
+          "pushl %%ebx\n\t"
+          "cpuid\n\t"
+          "popl %%ebx\n\t"
+          : "=a"(f1a), "=c"(f1c), "=d"(f1d)
+          : "a"(1)
+          :);
+    }
+    if (n >= 7) {
+      __asm__(
+          "pushl %%ebx\n\t"
+          "cpuid\n\t"
+          "movl %%ebx, %%eax\n\r"
+          "popl %%ebx"
+          : "=a"(f7b), "=c"(f7c)
+          : "a"(7), "c"(0)
+          : "edx");
+    }
+#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__)
+    U32 n;
+    __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx");
+    if (n >= 1) {
+      U32 f1a;
+      __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx");
+    }
+    if (n >= 7) {
+      U32 f7a;
+      __asm__("cpuid"
+              : "=a"(f7a), "=b"(f7b), "=c"(f7c)
+              : "a"(7), "c"(0)
+              : "edx");
+    }
+#endif
+    {
+        ZSTD_cpuid_t cpuid;
+        cpuid.f1c = f1c;
+        cpuid.f1d = f1d;
+        cpuid.f7b = f7b;
+        cpuid.f7c = f7c;
+        return cpuid;
+    }
+}
+
+#define X(name, r, bit)                                                        \
+  MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) {                 \
+    return ((cpuid.r) & (1U << bit)) != 0;                                     \
+  }
+
+/* cpuid(1): Processor Info and Feature Bits. */
+#define C(name, bit) X(name, f1c, bit)
+  C(sse3, 0)
+  C(pclmuldq, 1)
+  C(dtes64, 2)
+  C(monitor, 3)
+  C(dscpl, 4)
+  C(vmx, 5)
+  C(smx, 6)
+  C(eist, 7)
+  C(tm2, 8)
+  C(ssse3, 9)
+  C(cnxtid, 10)
+  C(fma, 12)
+  C(cx16, 13)
+  C(xtpr, 14)
+  C(pdcm, 15)
+  C(pcid, 17)
+  C(dca, 18)
+  C(sse41, 19)
+  C(sse42, 20)
+  C(x2apic, 21)
+  C(movbe, 22)
+  C(popcnt, 23)
+  C(tscdeadline, 24)
+  C(aes, 25)
+  C(xsave, 26)
+  C(osxsave, 27)
+  C(avx, 28)
+  C(f16c, 29)
+  C(rdrand, 30)
+#undef C
+#define D(name, bit) X(name, f1d, bit)
+  D(fpu, 0)
+  D(vme, 1)
+  D(de, 2)
+  D(pse, 3)
+  D(tsc, 4)
+  D(msr, 5)
+  D(pae, 6)
+  D(mce, 7)
+  D(cx8, 8)
+  D(apic, 9)
+  D(sep, 11)
+  D(mtrr, 12)
+  D(pge, 13)
+  D(mca, 14)
+  D(cmov, 15)
+  D(pat, 16)
+  D(pse36, 17)
+  D(psn, 18)
+  D(clfsh, 19)
+  D(ds, 21)
+  D(acpi, 22)
+  D(mmx, 23)
+  D(fxsr, 24)
+  D(sse, 25)
+  D(sse2, 26)
+  D(ss, 27)
+  D(htt, 28)
+  D(tm, 29)
+  D(pbe, 31)
+#undef D
+
+/* cpuid(7): Extended Features. */
+#define B(name, bit) X(name, f7b, bit)
+  B(bmi1, 3)
+  B(hle, 4)
+  B(avx2, 5)
+  B(smep, 7)
+  B(bmi2, 8)
+  B(erms, 9)
+  B(invpcid, 10)
+  B(rtm, 11)
+  B(mpx, 14)
+  B(avx512f, 16)
+  B(avx512dq, 17)
+  B(rdseed, 18)
+  B(adx, 19)
+  B(smap, 20)
+  B(avx512ifma, 21)
+  B(pcommit, 22)
+  B(clflushopt, 23)
+  B(clwb, 24)
+  B(avx512pf, 26)
+  B(avx512er, 27)
+  B(avx512cd, 28)
+  B(sha, 29)
+  B(avx512bw, 30)
+  B(avx512vl, 31)
+#undef B
+#define C(name, bit) X(name, f7c, bit)
+  C(prefetchwt1, 0)
+  C(avx512vbmi, 1)
+#undef C
+
+#undef X
+
+#endif /* ZSTD_COMMON_CPU_H */
diff --git a/vendor/github.com/DataDog/zstd/divsufsort.c b/vendor/github.com/DataDog/zstd/divsufsort.c
new file mode 100644
index 0000000..60cceb0
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/divsufsort.c
@@ -0,0 +1,1913 @@
+/*
+ * divsufsort.c for libdivsufsort-lite
+ * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*- Compiler specifics -*/
+#ifdef __clang__
+#pragma clang diagnostic ignored "-Wshorten-64-to-32"
+#endif
+
+#if defined(_MSC_VER)
+#  pragma warning(disable : 4244)
+#  pragma warning(disable : 4127)    /* C4127 : Condition expression is constant */
+#endif
+
+
+/*- Dependencies -*/
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "divsufsort.h"
+
+/*- Constants -*/
+#if defined(INLINE)
+# undef INLINE
+#endif
+#if !defined(INLINE)
+# define INLINE __inline
+#endif
+#if defined(ALPHABET_SIZE) && (ALPHABET_SIZE < 1)
+# undef ALPHABET_SIZE
+#endif
+#if !defined(ALPHABET_SIZE)
+# define ALPHABET_SIZE (256)
+#endif
+#define BUCKET_A_SIZE (ALPHABET_SIZE)
+#define BUCKET_B_SIZE (ALPHABET_SIZE * ALPHABET_SIZE)
+#if defined(SS_INSERTIONSORT_THRESHOLD)
+# if SS_INSERTIONSORT_THRESHOLD < 1
+#  undef SS_INSERTIONSORT_THRESHOLD
+#  define SS_INSERTIONSORT_THRESHOLD (1)
+# endif
+#else
+# define SS_INSERTIONSORT_THRESHOLD (8)
+#endif
+#if defined(SS_BLOCKSIZE)
+# if SS_BLOCKSIZE < 0
+#  undef SS_BLOCKSIZE
+#  define SS_BLOCKSIZE (0)
+# elif 32768 <= SS_BLOCKSIZE
+#  undef SS_BLOCKSIZE
+#  define SS_BLOCKSIZE (32767)
+# endif
+#else
+# define SS_BLOCKSIZE (1024)
+#endif
+/* minstacksize = log(SS_BLOCKSIZE) / log(3) * 2 */
+#if SS_BLOCKSIZE == 0
+# define SS_MISORT_STACKSIZE (96)
+#elif SS_BLOCKSIZE <= 4096
+# define SS_MISORT_STACKSIZE (16)
+#else
+# define SS_MISORT_STACKSIZE (24)
+#endif
+#define SS_SMERGE_STACKSIZE (32)
+#define TR_INSERTIONSORT_THRESHOLD (8)
+#define TR_STACKSIZE (64)
+
+
+/*- Macros -*/
+#ifndef SWAP
+# define SWAP(_a, _b) do { t = (_a); (_a) = (_b); (_b) = t; } while(0)
+#endif /* SWAP */
+#ifndef MIN
+# define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b))
+#endif /* MIN */
+#ifndef MAX
+# define MAX(_a, _b) (((_a) > (_b)) ? (_a) : (_b))
+#endif /* MAX */
+#define STACK_PUSH(_a, _b, _c, _d)\
+  do {\
+    assert(ssize < STACK_SIZE);\
+    stack[ssize].a = (_a), stack[ssize].b = (_b),\
+    stack[ssize].c = (_c), stack[ssize++].d = (_d);\
+  } while(0)
+#define STACK_PUSH5(_a, _b, _c, _d, _e)\
+  do {\
+    assert(ssize < STACK_SIZE);\
+    stack[ssize].a = (_a), stack[ssize].b = (_b),\
+    stack[ssize].c = (_c), stack[ssize].d = (_d), stack[ssize++].e = (_e);\
+  } while(0)
+#define STACK_POP(_a, _b, _c, _d)\
+  do {\
+    assert(0 <= ssize);\
+    if(ssize == 0) { return; }\
+    (_a) = stack[--ssize].a, (_b) = stack[ssize].b,\
+    (_c) = stack[ssize].c, (_d) = stack[ssize].d;\
+  } while(0)
+#define STACK_POP5(_a, _b, _c, _d, _e)\
+  do {\
+    assert(0 <= ssize);\
+    if(ssize == 0) { return; }\
+    (_a) = stack[--ssize].a, (_b) = stack[ssize].b,\
+    (_c) = stack[ssize].c, (_d) = stack[ssize].d, (_e) = stack[ssize].e;\
+  } while(0)
+#define BUCKET_A(_c0) bucket_A[(_c0)]
+#if ALPHABET_SIZE == 256
+#define BUCKET_B(_c0, _c1) (bucket_B[((_c1) << 8) | (_c0)])
+#define BUCKET_BSTAR(_c0, _c1) (bucket_B[((_c0) << 8) | (_c1)])
+#else
+#define BUCKET_B(_c0, _c1) (bucket_B[(_c1) * ALPHABET_SIZE + (_c0)])
+#define BUCKET_BSTAR(_c0, _c1) (bucket_B[(_c0) * ALPHABET_SIZE + (_c1)])
+#endif
+
+
+/*- Private Functions -*/
+
+static const int lg_table[256]= {
+ -1,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
+  5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
+  6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+  6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+  7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
+};
+
+#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE)
+
+static INLINE
+int
+ss_ilg(int n) {
+#if SS_BLOCKSIZE == 0
+  return (n & 0xffff0000) ?
+          ((n & 0xff000000) ?
+            24 + lg_table[(n >> 24) & 0xff] :
+            16 + lg_table[(n >> 16) & 0xff]) :
+          ((n & 0x0000ff00) ?
+             8 + lg_table[(n >>  8) & 0xff] :
+             0 + lg_table[(n >>  0) & 0xff]);
+#elif SS_BLOCKSIZE < 256
+  return lg_table[n];
+#else
+  return (n & 0xff00) ?
+          8 + lg_table[(n >> 8) & 0xff] :
+          0 + lg_table[(n >> 0) & 0xff];
+#endif
+}
+
+#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */
+
+#if SS_BLOCKSIZE != 0
+
+static const int sqq_table[256] = {
+  0,  16,  22,  27,  32,  35,  39,  42,  45,  48,  50,  53,  55,  57,  59,  61,
+ 64,  65,  67,  69,  71,  73,  75,  76,  78,  80,  81,  83,  84,  86,  87,  89,
+ 90,  91,  93,  94,  96,  97,  98,  99, 101, 102, 103, 104, 106, 107, 108, 109,
+110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
+128, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
+143, 144, 144, 145, 146, 147, 148, 149, 150, 150, 151, 152, 153, 154, 155, 155,
+156, 157, 158, 159, 160, 160, 161, 162, 163, 163, 164, 165, 166, 167, 167, 168,
+169, 170, 170, 171, 172, 173, 173, 174, 175, 176, 176, 177, 178, 178, 179, 180,
+181, 181, 182, 183, 183, 184, 185, 185, 186, 187, 187, 188, 189, 189, 190, 191,
+192, 192, 193, 193, 194, 195, 195, 196, 197, 197, 198, 199, 199, 200, 201, 201,
+202, 203, 203, 204, 204, 205, 206, 206, 207, 208, 208, 209, 209, 210, 211, 211,
+212, 212, 213, 214, 214, 215, 215, 216, 217, 217, 218, 218, 219, 219, 220, 221,
+221, 222, 222, 223, 224, 224, 225, 225, 226, 226, 227, 227, 228, 229, 229, 230,
+230, 231, 231, 232, 232, 233, 234, 234, 235, 235, 236, 236, 237, 237, 238, 238,
+239, 240, 240, 241, 241, 242, 242, 243, 243, 244, 244, 245, 245, 246, 246, 247,
+247, 248, 248, 249, 249, 250, 250, 251, 251, 252, 252, 253, 253, 254, 254, 255
+};
+
+static INLINE
+int
+ss_isqrt(int x) {
+  int y, e;
+
+  if(x >= (SS_BLOCKSIZE * SS_BLOCKSIZE)) { return SS_BLOCKSIZE; }
+  e = (x & 0xffff0000) ?
+        ((x & 0xff000000) ?
+          24 + lg_table[(x >> 24) & 0xff] :
+          16 + lg_table[(x >> 16) & 0xff]) :
+        ((x & 0x0000ff00) ?
+           8 + lg_table[(x >>  8) & 0xff] :
+           0 + lg_table[(x >>  0) & 0xff]);
+
+  if(e >= 16) {
+    y = sqq_table[x >> ((e - 6) - (e & 1))] << ((e >> 1) - 7);
+    if(e >= 24) { y = (y + 1 + x / y) >> 1; }
+    y = (y + 1 + x / y) >> 1;
+  } else if(e >= 8) {
+    y = (sqq_table[x >> ((e - 6) - (e & 1))] >> (7 - (e >> 1))) + 1;
+  } else {
+    return sqq_table[x] >> 4;
+  }
+
+  return (x < (y * y)) ? y - 1 : y;
+}
+
+#endif /* SS_BLOCKSIZE != 0 */
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Compares two suffixes. */
+static INLINE
+int
+ss_compare(const unsigned char *T,
+           const int *p1, const int *p2,
+           int depth) {
+  const unsigned char *U1, *U2, *U1n, *U2n;
+
+  for(U1 = T + depth + *p1,
+      U2 = T + depth + *p2,
+      U1n = T + *(p1 + 1) + 2,
+      U2n = T + *(p2 + 1) + 2;
+      (U1 < U1n) && (U2 < U2n) && (*U1 == *U2);
+      ++U1, ++U2) {
+  }
+
+  return U1 < U1n ?
+        (U2 < U2n ? *U1 - *U2 : 1) :
+        (U2 < U2n ? -1 : 0);
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+#if (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1)
+
+/* Insertionsort for small size groups */
+static
+void
+ss_insertionsort(const unsigned char *T, const int *PA,
+                 int *first, int *last, int depth) {
+  int *i, *j;
+  int t;
+  int r;
+
+  for(i = last - 2; first <= i; --i) {
+    for(t = *i, j = i + 1; 0 < (r = ss_compare(T, PA + t, PA + *j, depth));) {
+      do { *(j - 1) = *j; } while((++j < last) && (*j < 0));
+      if(last <= j) { break; }
+    }
+    if(r == 0) { *j = ~*j; }
+    *(j - 1) = t;
+  }
+}
+
+#endif /* (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1) */
+
+
+/*---------------------------------------------------------------------------*/
+
+#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE)
+
+static INLINE
+void
+ss_fixdown(const unsigned char *Td, const int *PA,
+           int *SA, int i, int size) {
+  int j, k;
+  int v;
+  int c, d, e;
+
+  for(v = SA[i], c = Td[PA[v]]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) {
+    d = Td[PA[SA[k = j++]]];
+    if(d < (e = Td[PA[SA[j]]])) { k = j; d = e; }
+    if(d <= c) { break; }
+  }
+  SA[i] = v;
+}
+
+/* Simple top-down heapsort. */
+static
+void
+ss_heapsort(const unsigned char *Td, const int *PA, int *SA, int size) {
+  int i, m;
+  int t;
+
+  m = size;
+  if((size % 2) == 0) {
+    m--;
+    if(Td[PA[SA[m / 2]]] < Td[PA[SA[m]]]) { SWAP(SA[m], SA[m / 2]); }
+  }
+
+  for(i = m / 2 - 1; 0 <= i; --i) { ss_fixdown(Td, PA, SA, i, m); }
+  if((size % 2) == 0) { SWAP(SA[0], SA[m]); ss_fixdown(Td, PA, SA, 0, m); }
+  for(i = m - 1; 0 < i; --i) {
+    t = SA[0], SA[0] = SA[i];
+    ss_fixdown(Td, PA, SA, 0, i);
+    SA[i] = t;
+  }
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Returns the median of three elements. */
+static INLINE
+int *
+ss_median3(const unsigned char *Td, const int *PA,
+           int *v1, int *v2, int *v3) {
+  int *t;
+  if(Td[PA[*v1]] > Td[PA[*v2]]) { SWAP(v1, v2); }
+  if(Td[PA[*v2]] > Td[PA[*v3]]) {
+    if(Td[PA[*v1]] > Td[PA[*v3]]) { return v1; }
+    else { return v3; }
+  }
+  return v2;
+}
+
+/* Returns the median of five elements. */
+static INLINE
+int *
+ss_median5(const unsigned char *Td, const int *PA,
+           int *v1, int *v2, int *v3, int *v4, int *v5) {
+  int *t;
+  if(Td[PA[*v2]] > Td[PA[*v3]]) { SWAP(v2, v3); }
+  if(Td[PA[*v4]] > Td[PA[*v5]]) { SWAP(v4, v5); }
+  if(Td[PA[*v2]] > Td[PA[*v4]]) { SWAP(v2, v4); SWAP(v3, v5); }
+  if(Td[PA[*v1]] > Td[PA[*v3]]) { SWAP(v1, v3); }
+  if(Td[PA[*v1]] > Td[PA[*v4]]) { SWAP(v1, v4); SWAP(v3, v5); }
+  if(Td[PA[*v3]] > Td[PA[*v4]]) { return v4; }
+  return v3;
+}
+
+/* Returns the pivot element. */
+static INLINE
+int *
+ss_pivot(const unsigned char *Td, const int *PA, int *first, int *last) {
+  int *middle;
+  int t;
+
+  t = last - first;
+  middle = first + t / 2;
+
+  if(t <= 512) {
+    if(t <= 32) {
+      return ss_median3(Td, PA, first, middle, last - 1);
+    } else {
+      t >>= 2;
+      return ss_median5(Td, PA, first, first + t, middle, last - 1 - t, last - 1);
+    }
+  }
+  t >>= 3;
+  first  = ss_median3(Td, PA, first, first + t, first + (t << 1));
+  middle = ss_median3(Td, PA, middle - t, middle, middle + t);
+  last   = ss_median3(Td, PA, last - 1 - (t << 1), last - 1 - t, last - 1);
+  return ss_median3(Td, PA, first, middle, last);
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Binary partition for substrings. */
+static INLINE
+int *
+ss_partition(const int *PA,
+                    int *first, int *last, int depth) {
+  int *a, *b;
+  int t;
+  for(a = first - 1, b = last;;) {
+    for(; (++a < b) && ((PA[*a] + depth) >= (PA[*a + 1] + 1));) { *a = ~*a; }
+    for(; (a < --b) && ((PA[*b] + depth) <  (PA[*b + 1] + 1));) { }
+    if(b <= a) { break; }
+    t = ~*b;
+    *b = *a;
+    *a = t;
+  }
+  if(first < a) { *first = ~*first; }
+  return a;
+}
+
+/* Multikey introsort for medium size groups. */
+static
+void
+ss_mintrosort(const unsigned char *T, const int *PA,
+              int *first, int *last,
+              int depth) {
+#define STACK_SIZE SS_MISORT_STACKSIZE
+  struct { int *a, *b, c; int d; } stack[STACK_SIZE];
+  const unsigned char *Td;
+  int *a, *b, *c, *d, *e, *f;
+  int s, t;
+  int ssize;
+  int limit;
+  int v, x = 0;
+
+  for(ssize = 0, limit = ss_ilg(last - first);;) {
+
+    if((last - first) <= SS_INSERTIONSORT_THRESHOLD) {
+#if 1 < SS_INSERTIONSORT_THRESHOLD
+      if(1 < (last - first)) { ss_insertionsort(T, PA, first, last, depth); }
+#endif
+      STACK_POP(first, last, depth, limit);
+      continue;
+    }
+
+    Td = T + depth;
+    if(limit-- == 0) { ss_heapsort(Td, PA, first, last - first); }
+    if(limit < 0) {
+      for(a = first + 1, v = Td[PA[*first]]; a < last; ++a) {
+        if((x = Td[PA[*a]]) != v) {
+          if(1 < (a - first)) { break; }
+          v = x;
+          first = a;
+        }
+      }
+      if(Td[PA[*first] - 1] < v) {
+        first = ss_partition(PA, first, a, depth);
+      }
+      if((a - first) <= (last - a)) {
+        if(1 < (a - first)) {
+          STACK_PUSH(a, last, depth, -1);
+          last = a, depth += 1, limit = ss_ilg(a - first);
+        } else {
+          first = a, limit = -1;
+        }
+      } else {
+        if(1 < (last - a)) {
+          STACK_PUSH(first, a, depth + 1, ss_ilg(a - first));
+          first = a, limit = -1;
+        } else {
+          last = a, depth += 1, limit = ss_ilg(a - first);
+        }
+      }
+      continue;
+    }
+
+    /* choose pivot */
+    a = ss_pivot(Td, PA, first, last);
+    v = Td[PA[*a]];
+    SWAP(*first, *a);
+
+    /* partition */
+    for(b = first; (++b < last) && ((x = Td[PA[*b]]) == v);) { }
+    if(((a = b) < last) && (x < v)) {
+      for(; (++b < last) && ((x = Td[PA[*b]]) <= v);) {
+        if(x == v) { SWAP(*b, *a); ++a; }
+      }
+    }
+    for(c = last; (b < --c) && ((x = Td[PA[*c]]) == v);) { }
+    if((b < (d = c)) && (x > v)) {
+      for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) {
+        if(x == v) { SWAP(*c, *d); --d; }
+      }
+    }
+    for(; b < c;) {
+      SWAP(*b, *c);
+      for(; (++b < c) && ((x = Td[PA[*b]]) <= v);) {
+        if(x == v) { SWAP(*b, *a); ++a; }
+      }
+      for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) {
+        if(x == v) { SWAP(*c, *d); --d; }
+      }
+    }
+
+    if(a <= d) {
+      c = b - 1;
+
+      if((s = a - first) > (t = b - a)) { s = t; }
+      for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
+      if((s = d - c) > (t = last - d - 1)) { s = t; }
+      for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
+
+      a = first + (b - a), c = last - (d - c);
+      b = (v <= Td[PA[*a] - 1]) ? a : ss_partition(PA, a, c, depth);
+
+      if((a - first) <= (last - c)) {
+        if((last - c) <= (c - b)) {
+          STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
+          STACK_PUSH(c, last, depth, limit);
+          last = a;
+        } else if((a - first) <= (c - b)) {
+          STACK_PUSH(c, last, depth, limit);
+          STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
+          last = a;
+        } else {
+          STACK_PUSH(c, last, depth, limit);
+          STACK_PUSH(first, a, depth, limit);
+          first = b, last = c, depth += 1, limit = ss_ilg(c - b);
+        }
+      } else {
+        if((a - first) <= (c - b)) {
+          STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
+          STACK_PUSH(first, a, depth, limit);
+          first = c;
+        } else if((last - c) <= (c - b)) {
+          STACK_PUSH(first, a, depth, limit);
+          STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
+          first = c;
+        } else {
+          STACK_PUSH(first, a, depth, limit);
+          STACK_PUSH(c, last, depth, limit);
+          first = b, last = c, depth += 1, limit = ss_ilg(c - b);
+        }
+      }
+    } else {
+      limit += 1;
+      if(Td[PA[*first] - 1] < v) {
+        first = ss_partition(PA, first, last, depth);
+        limit = ss_ilg(last - first);
+      }
+      depth += 1;
+    }
+  }
+#undef STACK_SIZE
+}
+
+#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */
+
+
+/*---------------------------------------------------------------------------*/
+
+#if SS_BLOCKSIZE != 0
+
+static INLINE
+void
+ss_blockswap(int *a, int *b, int n) {
+  int t;
+  for(; 0 < n; --n, ++a, ++b) {
+    t = *a, *a = *b, *b = t;
+  }
+}
+
+static INLINE
+void
+ss_rotate(int *first, int *middle, int *last) {
+  int *a, *b, t;
+  int l, r;
+  l = middle - first, r = last - middle;
+  for(; (0 < l) && (0 < r);) {
+    if(l == r) { ss_blockswap(first, middle, l); break; }
+    if(l < r) {
+      a = last - 1, b = middle - 1;
+      t = *a;
+      do {
+        *a-- = *b, *b-- = *a;
+        if(b < first) {
+          *a = t;
+          last = a;
+          if((r -= l + 1) <= l) { break; }
+          a -= 1, b = middle - 1;
+          t = *a;
+        }
+      } while(1);
+    } else {
+      a = first, b = middle;
+      t = *a;
+      do {
+        *a++ = *b, *b++ = *a;
+        if(last <= b) {
+          *a = t;
+          first = a + 1;
+          if((l -= r + 1) <= r) { break; }
+          a += 1, b = middle;
+          t = *a;
+        }
+      } while(1);
+    }
+  }
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+static
+void
+ss_inplacemerge(const unsigned char *T, const int *PA,
+                int *first, int *middle, int *last,
+                int depth) {
+  const int *p;
+  int *a, *b;
+  int len, half;
+  int q, r;
+  int x;
+
+  for(;;) {
+    if(*(last - 1) < 0) { x = 1; p = PA + ~*(last - 1); }
+    else                { x = 0; p = PA +  *(last - 1); }
+    for(a = first, len = middle - first, half = len >> 1, r = -1;
+        0 < len;
+        len = half, half >>= 1) {
+      b = a + half;
+      q = ss_compare(T, PA + ((0 <= *b) ? *b : ~*b), p, depth);
+      if(q < 0) {
+        a = b + 1;
+        half -= (len & 1) ^ 1;
+      } else {
+        r = q;
+      }
+    }
+    if(a < middle) {
+      if(r == 0) { *a = ~*a; }
+      ss_rotate(a, middle, last);
+      last -= middle - a;
+      middle = a;
+      if(first == middle) { break; }
+    }
+    --last;
+    if(x != 0) { while(*--last < 0) { } }
+    if(middle == last) { break; }
+  }
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Merge-forward with internal buffer. */
+static
+void
+ss_mergeforward(const unsigned char *T, const int *PA,
+                int *first, int *middle, int *last,
+                int *buf, int depth) {
+  int *a, *b, *c, *bufend;
+  int t;
+  int r;
+
+  bufend = buf + (middle - first) - 1;
+  ss_blockswap(buf, first, middle - first);
+
+  for(t = *(a = first), b = buf, c = middle;;) {
+    r = ss_compare(T, PA + *b, PA + *c, depth);
+    if(r < 0) {
+      do {
+        *a++ = *b;
+        if(bufend <= b) { *bufend = t; return; }
+        *b++ = *a;
+      } while(*b < 0);
+    } else if(r > 0) {
+      do {
+        *a++ = *c, *c++ = *a;
+        if(last <= c) {
+          while(b < bufend) { *a++ = *b, *b++ = *a; }
+          *a = *b, *b = t;
+          return;
+        }
+      } while(*c < 0);
+    } else {
+      *c = ~*c;
+      do {
+        *a++ = *b;
+        if(bufend <= b) { *bufend = t; return; }
+        *b++ = *a;
+      } while(*b < 0);
+
+      do {
+        *a++ = *c, *c++ = *a;
+        if(last <= c) {
+          while(b < bufend) { *a++ = *b, *b++ = *a; }
+          *a = *b, *b = t;
+          return;
+        }
+      } while(*c < 0);
+    }
+  }
+}
+
+/* Merge-backward with internal buffer. */
+static
+void
+ss_mergebackward(const unsigned char *T, const int *PA,
+                 int *first, int *middle, int *last,
+                 int *buf, int depth) {
+  const int *p1, *p2;
+  int *a, *b, *c, *bufend;
+  int t;
+  int r;
+  int x;
+
+  bufend = buf + (last - middle) - 1;
+  ss_blockswap(buf, middle, last - middle);
+
+  x = 0;
+  if(*bufend < 0)       { p1 = PA + ~*bufend; x |= 1; }
+  else                  { p1 = PA +  *bufend; }
+  if(*(middle - 1) < 0) { p2 = PA + ~*(middle - 1); x |= 2; }
+  else                  { p2 = PA +  *(middle - 1); }
+  for(t = *(a = last - 1), b = bufend, c = middle - 1;;) {
+    r = ss_compare(T, p1, p2, depth);
+    if(0 < r) {
+      if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; }
+      *a-- = *b;
+      if(b <= buf) { *buf = t; break; }
+      *b-- = *a;
+      if(*b < 0) { p1 = PA + ~*b; x |= 1; }
+      else       { p1 = PA +  *b; }
+    } else if(r < 0) {
+      if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; }
+      *a-- = *c, *c-- = *a;
+      if(c < first) {
+        while(buf < b) { *a-- = *b, *b-- = *a; }
+        *a = *b, *b = t;
+        break;
+      }
+      if(*c < 0) { p2 = PA + ~*c; x |= 2; }
+      else       { p2 = PA +  *c; }
+    } else {
+      if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; }
+      *a-- = ~*b;
+      if(b <= buf) { *buf = t; break; }
+      *b-- = *a;
+      if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; }
+      *a-- = *c, *c-- = *a;
+      if(c < first) {
+        while(buf < b) { *a-- = *b, *b-- = *a; }
+        *a = *b, *b = t;
+        break;
+      }
+      if(*b < 0) { p1 = PA + ~*b; x |= 1; }
+      else       { p1 = PA +  *b; }
+      if(*c < 0) { p2 = PA + ~*c; x |= 2; }
+      else       { p2 = PA +  *c; }
+    }
+  }
+}
+
+/* D&C based merge. */
+static
+void
+ss_swapmerge(const unsigned char *T, const int *PA,
+             int *first, int *middle, int *last,
+             int *buf, int bufsize, int depth) {
+#define STACK_SIZE SS_SMERGE_STACKSIZE
+#define GETIDX(a) ((0 <= (a)) ? (a) : (~(a)))
+#define MERGE_CHECK(a, b, c)\
+  do {\
+    if(((c) & 1) ||\
+       (((c) & 2) && (ss_compare(T, PA + GETIDX(*((a) - 1)), PA + *(a), depth) == 0))) {\
+      *(a) = ~*(a);\
+    }\
+    if(((c) & 4) && ((ss_compare(T, PA + GETIDX(*((b) - 1)), PA + *(b), depth) == 0))) {\
+      *(b) = ~*(b);\
+    }\
+  } while(0)
+  struct { int *a, *b, *c; int d; } stack[STACK_SIZE];
+  int *l, *r, *lm, *rm;
+  int m, len, half;
+  int ssize;
+  int check, next;
+
+  for(check = 0, ssize = 0;;) {
+    if((last - middle) <= bufsize) {
+      if((first < middle) && (middle < last)) {
+        ss_mergebackward(T, PA, first, middle, last, buf, depth);
+      }
+      MERGE_CHECK(first, last, check);
+      STACK_POP(first, middle, last, check);
+      continue;
+    }
+
+    if((middle - first) <= bufsize) {
+      if(first < middle) {
+        ss_mergeforward(T, PA, first, middle, last, buf, depth);
+      }
+      MERGE_CHECK(first, last, check);
+      STACK_POP(first, middle, last, check);
+      continue;
+    }
+
+    for(m = 0, len = MIN(middle - first, last - middle), half = len >> 1;
+        0 < len;
+        len = half, half >>= 1) {
+      if(ss_compare(T, PA + GETIDX(*(middle + m + half)),
+                       PA + GETIDX(*(middle - m - half - 1)), depth) < 0) {
+        m += half + 1;
+        half -= (len & 1) ^ 1;
+      }
+    }
+
+    if(0 < m) {
+      lm = middle - m, rm = middle + m;
+      ss_blockswap(lm, middle, m);
+      l = r = middle, next = 0;
+      if(rm < last) {
+        if(*rm < 0) {
+          *rm = ~*rm;
+          if(first < lm) { for(; *--l < 0;) { } next |= 4; }
+          next |= 1;
+        } else if(first < lm) {
+          for(; *r < 0; ++r) { }
+          next |= 2;
+        }
+      }
+
+      if((l - first) <= (last - r)) {
+        STACK_PUSH(r, rm, last, (next & 3) | (check & 4));
+        middle = lm, last = l, check = (check & 3) | (next & 4);
+      } else {
+        if((next & 2) && (r == middle)) { next ^= 6; }
+        STACK_PUSH(first, lm, l, (check & 3) | (next & 4));
+        first = r, middle = rm, check = (next & 3) | (check & 4);
+      }
+    } else {
+      if(ss_compare(T, PA + GETIDX(*(middle - 1)), PA + *middle, depth) == 0) {
+        *middle = ~*middle;
+      }
+      MERGE_CHECK(first, last, check);
+      STACK_POP(first, middle, last, check);
+    }
+  }
+#undef STACK_SIZE
+}
+
+#endif /* SS_BLOCKSIZE != 0 */
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Substring sort */
+static
+void
+sssort(const unsigned char *T, const int *PA,
+       int *first, int *last,
+       int *buf, int bufsize,
+       int depth, int n, int lastsuffix) {
+  int *a;
+#if SS_BLOCKSIZE != 0
+  int *b, *middle, *curbuf;
+  int j, k, curbufsize, limit;
+#endif
+  int i;
+
+  if(lastsuffix != 0) { ++first; }
+
+#if SS_BLOCKSIZE == 0
+  ss_mintrosort(T, PA, first, last, depth);
+#else
+  if((bufsize < SS_BLOCKSIZE) &&
+      (bufsize < (last - first)) &&
+      (bufsize < (limit = ss_isqrt(last - first)))) {
+    if(SS_BLOCKSIZE < limit) { limit = SS_BLOCKSIZE; }
+    buf = middle = last - limit, bufsize = limit;
+  } else {
+    middle = last, limit = 0;
+  }
+  for(a = first, i = 0; SS_BLOCKSIZE < (middle - a); a += SS_BLOCKSIZE, ++i) {
+#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
+    ss_mintrosort(T, PA, a, a + SS_BLOCKSIZE, depth);
+#elif 1 < SS_BLOCKSIZE
+    ss_insertionsort(T, PA, a, a + SS_BLOCKSIZE, depth);
+#endif
+    curbufsize = last - (a + SS_BLOCKSIZE);
+    curbuf = a + SS_BLOCKSIZE;
+    if(curbufsize <= bufsize) { curbufsize = bufsize, curbuf = buf; }
+    for(b = a, k = SS_BLOCKSIZE, j = i; j & 1; b -= k, k <<= 1, j >>= 1) {
+      ss_swapmerge(T, PA, b - k, b, b + k, curbuf, curbufsize, depth);
+    }
+  }
+#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
+  ss_mintrosort(T, PA, a, middle, depth);
+#elif 1 < SS_BLOCKSIZE
+  ss_insertionsort(T, PA, a, middle, depth);
+#endif
+  for(k = SS_BLOCKSIZE; i != 0; k <<= 1, i >>= 1) {
+    if(i & 1) {
+      ss_swapmerge(T, PA, a - k, a, middle, buf, bufsize, depth);
+      a -= k;
+    }
+  }
+  if(limit != 0) {
+#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
+    ss_mintrosort(T, PA, middle, last, depth);
+#elif 1 < SS_BLOCKSIZE
+    ss_insertionsort(T, PA, middle, last, depth);
+#endif
+    ss_inplacemerge(T, PA, first, middle, last, depth);
+  }
+#endif
+
+  if(lastsuffix != 0) {
+    /* Insert last type B* suffix. */
+    int PAi[2]; PAi[0] = PA[*(first - 1)], PAi[1] = n - 2;
+    for(a = first, i = *(first - 1);
+        (a < last) && ((*a < 0) || (0 < ss_compare(T, &(PAi[0]), PA + *a, depth)));
+        ++a) {
+      *(a - 1) = *a;
+    }
+    *(a - 1) = i;
+  }
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+static INLINE
+int
+tr_ilg(int n) {
+  return (n & 0xffff0000) ?
+          ((n & 0xff000000) ?
+            24 + lg_table[(n >> 24) & 0xff] :
+            16 + lg_table[(n >> 16) & 0xff]) :
+          ((n & 0x0000ff00) ?
+             8 + lg_table[(n >>  8) & 0xff] :
+             0 + lg_table[(n >>  0) & 0xff]);
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Simple insertionsort for small size groups. */
+static
+void
+tr_insertionsort(const int *ISAd, int *first, int *last) {
+  int *a, *b;
+  int t, r;
+
+  for(a = first + 1; a < last; ++a) {
+    for(t = *a, b = a - 1; 0 > (r = ISAd[t] - ISAd[*b]);) {
+      do { *(b + 1) = *b; } while((first <= --b) && (*b < 0));
+      if(b < first) { break; }
+    }
+    if(r == 0) { *b = ~*b; }
+    *(b + 1) = t;
+  }
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+static INLINE
+void
+tr_fixdown(const int *ISAd, int *SA, int i, int size) {
+  int j, k;
+  int v;
+  int c, d, e;
+
+  for(v = SA[i], c = ISAd[v]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) {
+    d = ISAd[SA[k = j++]];
+    if(d < (e = ISAd[SA[j]])) { k = j; d = e; }
+    if(d <= c) { break; }
+  }
+  SA[i] = v;
+}
+
+/* Simple top-down heapsort. */
+static
+void
+tr_heapsort(const int *ISAd, int *SA, int size) {
+  int i, m;
+  int t;
+
+  m = size;
+  if((size % 2) == 0) {
+    m--;
+    if(ISAd[SA[m / 2]] < ISAd[SA[m]]) { SWAP(SA[m], SA[m / 2]); }
+  }
+
+  for(i = m / 2 - 1; 0 <= i; --i) { tr_fixdown(ISAd, SA, i, m); }
+  if((size % 2) == 0) { SWAP(SA[0], SA[m]); tr_fixdown(ISAd, SA, 0, m); }
+  for(i = m - 1; 0 < i; --i) {
+    t = SA[0], SA[0] = SA[i];
+    tr_fixdown(ISAd, SA, 0, i);
+    SA[i] = t;
+  }
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Returns the median of three elements. */
+static INLINE
+int *
+tr_median3(const int *ISAd, int *v1, int *v2, int *v3) {
+  int *t;
+  if(ISAd[*v1] > ISAd[*v2]) { SWAP(v1, v2); }
+  if(ISAd[*v2] > ISAd[*v3]) {
+    if(ISAd[*v1] > ISAd[*v3]) { return v1; }
+    else { return v3; }
+  }
+  return v2;
+}
+
+/* Returns the median of five elements. */
+static INLINE
+int *
+tr_median5(const int *ISAd,
+           int *v1, int *v2, int *v3, int *v4, int *v5) {
+  int *t;
+  if(ISAd[*v2] > ISAd[*v3]) { SWAP(v2, v3); }
+  if(ISAd[*v4] > ISAd[*v5]) { SWAP(v4, v5); }
+  if(ISAd[*v2] > ISAd[*v4]) { SWAP(v2, v4); SWAP(v3, v5); }
+  if(ISAd[*v1] > ISAd[*v3]) { SWAP(v1, v3); }
+  if(ISAd[*v1] > ISAd[*v4]) { SWAP(v1, v4); SWAP(v3, v5); }
+  if(ISAd[*v3] > ISAd[*v4]) { return v4; }
+  return v3;
+}
+
+/* Returns the pivot element. */
+static INLINE
+int *
+tr_pivot(const int *ISAd, int *first, int *last) {
+  int *middle;
+  int t;
+
+  t = last - first;
+  middle = first + t / 2;
+
+  if(t <= 512) {
+    if(t <= 32) {
+      return tr_median3(ISAd, first, middle, last - 1);
+    } else {
+      t >>= 2;
+      return tr_median5(ISAd, first, first + t, middle, last - 1 - t, last - 1);
+    }
+  }
+  t >>= 3;
+  first  = tr_median3(ISAd, first, first + t, first + (t << 1));
+  middle = tr_median3(ISAd, middle - t, middle, middle + t);
+  last   = tr_median3(ISAd, last - 1 - (t << 1), last - 1 - t, last - 1);
+  return tr_median3(ISAd, first, middle, last);
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+typedef struct _trbudget_t trbudget_t;
+struct _trbudget_t {
+  int chance;
+  int remain;
+  int incval;
+  int count;
+};
+
+static INLINE
+void
+trbudget_init(trbudget_t *budget, int chance, int incval) {
+  budget->chance = chance;
+  budget->remain = budget->incval = incval;
+}
+
+static INLINE
+int
+trbudget_check(trbudget_t *budget, int size) {
+  if(size <= budget->remain) { budget->remain -= size; return 1; }
+  if(budget->chance == 0) { budget->count += size; return 0; }
+  budget->remain += budget->incval - size;
+  budget->chance -= 1;
+  return 1;
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+static INLINE
+void
+tr_partition(const int *ISAd,
+             int *first, int *middle, int *last,
+             int **pa, int **pb, int v) {
+  int *a, *b, *c, *d, *e, *f;
+  int t, s;
+  int x = 0;
+
+  for(b = middle - 1; (++b < last) && ((x = ISAd[*b]) == v);) { }
+  if(((a = b) < last) && (x < v)) {
+    for(; (++b < last) && ((x = ISAd[*b]) <= v);) {
+      if(x == v) { SWAP(*b, *a); ++a; }
+    }
+  }
+  for(c = last; (b < --c) && ((x = ISAd[*c]) == v);) { }
+  if((b < (d = c)) && (x > v)) {
+    for(; (b < --c) && ((x = ISAd[*c]) >= v);) {
+      if(x == v) { SWAP(*c, *d); --d; }
+    }
+  }
+  for(; b < c;) {
+    SWAP(*b, *c);
+    for(; (++b < c) && ((x = ISAd[*b]) <= v);) {
+      if(x == v) { SWAP(*b, *a); ++a; }
+    }
+    for(; (b < --c) && ((x = ISAd[*c]) >= v);) {
+      if(x == v) { SWAP(*c, *d); --d; }
+    }
+  }
+
+  if(a <= d) {
+    c = b - 1;
+    if((s = a - first) > (t = b - a)) { s = t; }
+    for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
+    if((s = d - c) > (t = last - d - 1)) { s = t; }
+    for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
+    first += (b - a), last -= (d - c);
+  }
+  *pa = first, *pb = last;
+}
+
+static
+void
+tr_copy(int *ISA, const int *SA,
+        int *first, int *a, int *b, int *last,
+        int depth) {
+  /* sort suffixes of middle partition
+     by using sorted order of suffixes of left and right partition. */
+  int *c, *d, *e;
+  int s, v;
+
+  v = b - SA - 1;
+  for(c = first, d = a - 1; c <= d; ++c) {
+    if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
+      *++d = s;
+      ISA[s] = d - SA;
+    }
+  }
+  for(c = last - 1, e = d + 1, d = b; e < d; --c) {
+    if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
+      *--d = s;
+      ISA[s] = d - SA;
+    }
+  }
+}
+
+static
+void
+tr_partialcopy(int *ISA, const int *SA,
+               int *first, int *a, int *b, int *last,
+               int depth) {
+  int *c, *d, *e;
+  int s, v;
+  int rank, lastrank, newrank = -1;
+
+  v = b - SA - 1;
+  lastrank = -1;
+  for(c = first, d = a - 1; c <= d; ++c) {
+    if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
+      *++d = s;
+      rank = ISA[s + depth];
+      if(lastrank != rank) { lastrank = rank; newrank = d - SA; }
+      ISA[s] = newrank;
+    }
+  }
+
+  lastrank = -1;
+  for(e = d; first <= e; --e) {
+    rank = ISA[*e];
+    if(lastrank != rank) { lastrank = rank; newrank = e - SA; }
+    if(newrank != rank) { ISA[*e] = newrank; }
+  }
+
+  lastrank = -1;
+  for(c = last - 1, e = d + 1, d = b; e < d; --c) {
+    if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
+      *--d = s;
+      rank = ISA[s + depth];
+      if(lastrank != rank) { lastrank = rank; newrank = d - SA; }
+      ISA[s] = newrank;
+    }
+  }
+}
+
+static
+void
+tr_introsort(int *ISA, const int *ISAd,
+             int *SA, int *first, int *last,
+             trbudget_t *budget) {
+#define STACK_SIZE TR_STACKSIZE
+  struct { const int *a; int *b, *c; int d, e; }stack[STACK_SIZE];
+  int *a, *b, *c;
+  int t;
+  int v, x = 0;
+  int incr = ISAd - ISA;
+  int limit, next;
+  int ssize, trlink = -1;
+
+  for(ssize = 0, limit = tr_ilg(last - first);;) {
+
+    if(limit < 0) {
+      if(limit == -1) {
+        /* tandem repeat partition */
+        tr_partition(ISAd - incr, first, first, last, &a, &b, last - SA - 1);
+
+        /* update ranks */
+        if(a < last) {
+          for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; }
+        }
+        if(b < last) {
+          for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; }
+        }
+
+        /* push */
+        if(1 < (b - a)) {
+          STACK_PUSH5(NULL, a, b, 0, 0);
+          STACK_PUSH5(ISAd - incr, first, last, -2, trlink);
+          trlink = ssize - 2;
+        }
+        if((a - first) <= (last - b)) {
+          if(1 < (a - first)) {
+            STACK_PUSH5(ISAd, b, last, tr_ilg(last - b), trlink);
+            last = a, limit = tr_ilg(a - first);
+          } else if(1 < (last - b)) {
+            first = b, limit = tr_ilg(last - b);
+          } else {
+            STACK_POP5(ISAd, first, last, limit, trlink);
+          }
+        } else {
+          if(1 < (last - b)) {
+            STACK_PUSH5(ISAd, first, a, tr_ilg(a - first), trlink);
+            first = b, limit = tr_ilg(last - b);
+          } else if(1 < (a - first)) {
+            last = a, limit = tr_ilg(a - first);
+          } else {
+            STACK_POP5(ISAd, first, last, limit, trlink);
+          }
+        }
+      } else if(limit == -2) {
+        /* tandem repeat copy */
+        a = stack[--ssize].b, b = stack[ssize].c;
+        if(stack[ssize].d == 0) {
+          tr_copy(ISA, SA, first, a, b, last, ISAd - ISA);
+        } else {
+          if(0 <= trlink) { stack[trlink].d = -1; }
+          tr_partialcopy(ISA, SA, first, a, b, last, ISAd - ISA);
+        }
+        STACK_POP5(ISAd, first, last, limit, trlink);
+      } else {
+        /* sorted partition */
+        if(0 <= *first) {
+          a = first;
+          do { ISA[*a] = a - SA; } while((++a < last) && (0 <= *a));
+          first = a;
+        }
+        if(first < last) {
+          a = first; do { *a = ~*a; } while(*++a < 0);
+          next = (ISA[*a] != ISAd[*a]) ? tr_ilg(a - first + 1) : -1;
+          if(++a < last) { for(b = first, v = a - SA - 1; b < a; ++b) { ISA[*b] = v; } }
+
+          /* push */
+          if(trbudget_check(budget, a - first)) {
+            if((a - first) <= (last - a)) {
+              STACK_PUSH5(ISAd, a, last, -3, trlink);
+              ISAd += incr, last = a, limit = next;
+            } else {
+              if(1 < (last - a)) {
+                STACK_PUSH5(ISAd + incr, first, a, next, trlink);
+                first = a, limit = -3;
+              } else {
+                ISAd += incr, last = a, limit = next;
+              }
+            }
+          } else {
+            if(0 <= trlink) { stack[trlink].d = -1; }
+            if(1 < (last - a)) {
+              first = a, limit = -3;
+            } else {
+              STACK_POP5(ISAd, first, last, limit, trlink);
+            }
+          }
+        } else {
+          STACK_POP5(ISAd, first, last, limit, trlink);
+        }
+      }
+      continue;
+    }
+
+    if((last - first) <= TR_INSERTIONSORT_THRESHOLD) {
+      tr_insertionsort(ISAd, first, last);
+      limit = -3;
+      continue;
+    }
+
+    if(limit-- == 0) {
+      tr_heapsort(ISAd, first, last - first);
+      for(a = last - 1; first < a; a = b) {
+        for(x = ISAd[*a], b = a - 1; (first <= b) && (ISAd[*b] == x); --b) { *b = ~*b; }
+      }
+      limit = -3;
+      continue;
+    }
+
+    /* choose pivot */
+    a = tr_pivot(ISAd, first, last);
+    SWAP(*first, *a);
+    v = ISAd[*first];
+
+    /* partition */
+    tr_partition(ISAd, first, first + 1, last, &a, &b, v);
+    if((last - first) != (b - a)) {
+      next = (ISA[*a] != v) ? tr_ilg(b - a) : -1;
+
+      /* update ranks */
+      for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; }
+      if(b < last) { for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; } }
+
+      /* push */
+      if((1 < (b - a)) && (trbudget_check(budget, b - a))) {
+        if((a - first) <= (last - b)) {
+          if((last - b) <= (b - a)) {
+            if(1 < (a - first)) {
+              STACK_PUSH5(ISAd + incr, a, b, next, trlink);
+              STACK_PUSH5(ISAd, b, last, limit, trlink);
+              last = a;
+            } else if(1 < (last - b)) {
+              STACK_PUSH5(ISAd + incr, a, b, next, trlink);
+              first = b;
+            } else {
+              ISAd += incr, first = a, last = b, limit = next;
+            }
+          } else if((a - first) <= (b - a)) {
+            if(1 < (a - first)) {
+              STACK_PUSH5(ISAd, b, last, limit, trlink);
+              STACK_PUSH5(ISAd + incr, a, b, next, trlink);
+              last = a;
+            } else {
+              STACK_PUSH5(ISAd, b, last, limit, trlink);
+              ISAd += incr, first = a, last = b, limit = next;
+            }
+          } else {
+            STACK_PUSH5(ISAd, b, last, limit, trlink);
+            STACK_PUSH5(ISAd, first, a, limit, trlink);
+            ISAd += incr, first = a, last = b, limit = next;
+          }
+        } else {
+          if((a - first) <= (b - a)) {
+            if(1 < (last - b)) {
+              STACK_PUSH5(ISAd + incr, a, b, next, trlink);
+              STACK_PUSH5(ISAd, first, a, limit, trlink);
+              first = b;
+            } else if(1 < (a - first)) {
+              STACK_PUSH5(ISAd + incr, a, b, next, trlink);
+              last = a;
+            } else {
+              ISAd += incr, first = a, last = b, limit = next;
+            }
+          } else if((last - b) <= (b - a)) {
+            if(1 < (last - b)) {
+              STACK_PUSH5(ISAd, first, a, limit, trlink);
+              STACK_PUSH5(ISAd + incr, a, b, next, trlink);
+              first = b;
+            } else {
+              STACK_PUSH5(ISAd, first, a, limit, trlink);
+              ISAd += incr, first = a, last = b, limit = next;
+            }
+          } else {
+            STACK_PUSH5(ISAd, first, a, limit, trlink);
+            STACK_PUSH5(ISAd, b, last, limit, trlink);
+            ISAd += incr, first = a, last = b, limit = next;
+          }
+        }
+      } else {
+        if((1 < (b - a)) && (0 <= trlink)) { stack[trlink].d = -1; }
+        if((a - first) <= (last - b)) {
+          if(1 < (a - first)) {
+            STACK_PUSH5(ISAd, b, last, limit, trlink);
+            last = a;
+          } else if(1 < (last - b)) {
+            first = b;
+          } else {
+            STACK_POP5(ISAd, first, last, limit, trlink);
+          }
+        } else {
+          if(1 < (last - b)) {
+            STACK_PUSH5(ISAd, first, a, limit, trlink);
+            first = b;
+          } else if(1 < (a - first)) {
+            last = a;
+          } else {
+            STACK_POP5(ISAd, first, last, limit, trlink);
+          }
+        }
+      }
+    } else {
+      if(trbudget_check(budget, last - first)) {
+        limit = tr_ilg(last - first), ISAd += incr;
+      } else {
+        if(0 <= trlink) { stack[trlink].d = -1; }
+        STACK_POP5(ISAd, first, last, limit, trlink);
+      }
+    }
+  }
+#undef STACK_SIZE
+}
+
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Tandem repeat sort */
+static
+void
+trsort(int *ISA, int *SA, int n, int depth) {
+  int *ISAd;
+  int *first, *last;
+  trbudget_t budget;
+  int t, skip, unsorted;
+
+  trbudget_init(&budget, tr_ilg(n) * 2 / 3, n);
+/*  trbudget_init(&budget, tr_ilg(n) * 3 / 4, n); */
+  for(ISAd = ISA + depth; -n < *SA; ISAd += ISAd - ISA) {
+    first = SA;
+    skip = 0;
+    unsorted = 0;
+    do {
+      if((t = *first) < 0) { first -= t; skip += t; }
+      else {
+        if(skip != 0) { *(first + skip) = skip; skip = 0; }
+        last = SA + ISA[t] + 1;
+        if(1 < (last - first)) {
+          budget.count = 0;
+          tr_introsort(ISA, ISAd, SA, first, last, &budget);
+          if(budget.count != 0) { unsorted += budget.count; }
+          else { skip = first - last; }
+        } else if((last - first) == 1) {
+          skip = -1;
+        }
+        first = last;
+      }
+    } while(first < (SA + n));
+    if(skip != 0) { *(first + skip) = skip; }
+    if(unsorted == 0) { break; }
+  }
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+/* Sorts suffixes of type B*. */
+static
+int
+sort_typeBstar(const unsigned char *T, int *SA,
+               int *bucket_A, int *bucket_B,
+               int n, int openMP) {
+  int *PAb, *ISAb, *buf;
+#ifdef LIBBSC_OPENMP
+  int *curbuf;
+  int l;
+#endif
+  int i, j, k, t, m, bufsize;
+  int c0, c1;
+#ifdef LIBBSC_OPENMP
+  int d0, d1;
+#endif
+  (void)openMP;
+
+  /* Initialize bucket arrays. */
+  for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; }
+  for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; }
+
+  /* Count the number of occurrences of the first one or two characters of each
+     type A, B and B* suffix. Moreover, store the beginning position of all
+     type B* suffixes into the array SA. */
+  for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) {
+    /* type A suffix. */
+    do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1));
+    if(0 <= i) {
+      /* type B* suffix. */
+      ++BUCKET_BSTAR(c0, c1);
+      SA[--m] = i;
+      /* type B suffix. */
+      for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) {
+        ++BUCKET_B(c0, c1);
+      }
+    }
+  }
+  m = n - m;
+/*
+note:
+  A type B* suffix is lexicographically smaller than a type B suffix that
+  begins with the same first two characters.
+*/
+
+  /* Calculate the index of start/end point of each bucket. */
+  for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) {
+    t = i + BUCKET_A(c0);
+    BUCKET_A(c0) = i + j; /* start point */
+    i = t + BUCKET_B(c0, c0);
+    for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) {
+      j += BUCKET_BSTAR(c0, c1);
+      BUCKET_BSTAR(c0, c1) = j; /* end point */
+      i += BUCKET_B(c0, c1);
+    }
+  }
+
+  if(0 < m) {
+    /* Sort the type B* suffixes by their first two characters. */
+    PAb = SA + n - m; ISAb = SA + m;
+    for(i = m - 2; 0 <= i; --i) {
+      t = PAb[i], c0 = T[t], c1 = T[t + 1];
+      SA[--BUCKET_BSTAR(c0, c1)] = i;
+    }
+    t = PAb[m - 1], c0 = T[t], c1 = T[t + 1];
+    SA[--BUCKET_BSTAR(c0, c1)] = m - 1;
+
+    /* Sort the type B* substrings using sssort. */
+#ifdef LIBBSC_OPENMP
+    if (openMP)
+    {
+        buf = SA + m;
+        c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m;
+#pragma omp parallel default(shared) private(bufsize, curbuf, k, l, d0, d1)
+        {
+          bufsize = (n - (2 * m)) / omp_get_num_threads();
+          curbuf = buf + omp_get_thread_num() * bufsize;
+          k = 0;
+          for(;;) {
+            #pragma omp critical(sssort_lock)
+            {
+              if(0 < (l = j)) {
+                d0 = c0, d1 = c1;
+                do {
+                  k = BUCKET_BSTAR(d0, d1);
+                  if(--d1 <= d0) {
+                    d1 = ALPHABET_SIZE - 1;
+                    if(--d0 < 0) { break; }
+                  }
+                } while(((l - k) <= 1) && (0 < (l = k)));
+                c0 = d0, c1 = d1, j = k;
+              }
+            }
+            if(l == 0) { break; }
+            sssort(T, PAb, SA + k, SA + l,
+                   curbuf, bufsize, 2, n, *(SA + k) == (m - 1));
+          }
+        }
+    }
+    else
+    {
+        buf = SA + m, bufsize = n - (2 * m);
+        for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {
+          for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {
+            i = BUCKET_BSTAR(c0, c1);
+            if(1 < (j - i)) {
+              sssort(T, PAb, SA + i, SA + j,
+                     buf, bufsize, 2, n, *(SA + i) == (m - 1));
+            }
+          }
+        }
+    }
+#else
+    buf = SA + m, bufsize = n - (2 * m);
+    for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {
+      for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {
+        i = BUCKET_BSTAR(c0, c1);
+        if(1 < (j - i)) {
+          sssort(T, PAb, SA + i, SA + j,
+                 buf, bufsize, 2, n, *(SA + i) == (m - 1));
+        }
+      }
+    }
+#endif
+
+    /* Compute ranks of type B* substrings. */
+    for(i = m - 1; 0 <= i; --i) {
+      if(0 <= SA[i]) {
+        j = i;
+        do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i]));
+        SA[i + 1] = i - j;
+        if(i <= 0) { break; }
+      }
+      j = i;
+      do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0);
+      ISAb[SA[i]] = j;
+    }
+
+    /* Construct the inverse suffix array of type B* suffixes using trsort. */
+    trsort(ISAb, SA, m, 1);
+
+    /* Set the sorted order of tyoe B* suffixes. */
+    for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) {
+      for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { }
+      if(0 <= i) {
+        t = i;
+        for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { }
+        SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t;
+      }
+    }
+
+    /* Calculate the index of start/end point of each bucket. */
+    BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */
+    for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) {
+      i = BUCKET_A(c0 + 1) - 1;
+      for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) {
+        t = i - BUCKET_B(c0, c1);
+        BUCKET_B(c0, c1) = i; /* end point */
+
+        /* Move all type B* suffixes to the correct position. */
+        for(i = t, j = BUCKET_BSTAR(c0, c1);
+            j <= k;
+            --i, --k) { SA[i] = SA[k]; }
+      }
+      BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */
+      BUCKET_B(c0, c0) = i; /* end point */
+    }
+  }
+
+  return m;
+}
+
+/* Constructs the suffix array by using the sorted order of type B* suffixes. */
+static
+void
+construct_SA(const unsigned char *T, int *SA,
+             int *bucket_A, int *bucket_B,
+             int n, int m) {
+  int *i, *j, *k;
+  int s;
+  int c0, c1, c2;
+
+  if(0 < m) {
+    /* Construct the sorted order of type B suffixes by using
+       the sorted order of type B* suffixes. */
+    for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
+      /* Scan the suffix array from right to left. */
+      for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
+          j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
+          i <= j;
+          --j) {
+        if(0 < (s = *j)) {
+          assert(T[s] == c1);
+          assert(((s + 1) < n) && (T[s] <= T[s + 1]));
+          assert(T[s - 1] <= T[s]);
+          *j = ~s;
+          c0 = T[--s];
+          if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
+          if(c0 != c2) {
+            if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
+            k = SA + BUCKET_B(c2 = c0, c1);
+          }
+          assert(k < j);
+          *k-- = s;
+        } else {
+          assert(((s == 0) && (T[s] == c1)) || (s < 0));
+          *j = ~s;
+        }
+      }
+    }
+  }
+
+  /* Construct the suffix array by using
+     the sorted order of type B suffixes. */
+  k = SA + BUCKET_A(c2 = T[n - 1]);
+  *k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1);
+  /* Scan the suffix array from left to right. */
+  for(i = SA, j = SA + n; i < j; ++i) {
+    if(0 < (s = *i)) {
+      assert(T[s - 1] >= T[s]);
+      c0 = T[--s];
+      if((s == 0) || (T[s - 1] < c0)) { s = ~s; }
+      if(c0 != c2) {
+        BUCKET_A(c2) = k - SA;
+        k = SA + BUCKET_A(c2 = c0);
+      }
+      assert(i < k);
+      *k++ = s;
+    } else {
+      assert(s < 0);
+      *i = ~s;
+    }
+  }
+}
+
+/* Constructs the burrows-wheeler transformed string directly
+   by using the sorted order of type B* suffixes. */
+static
+int
+construct_BWT(const unsigned char *T, int *SA,
+              int *bucket_A, int *bucket_B,
+              int n, int m) {
+  int *i, *j, *k, *orig;
+  int s;
+  int c0, c1, c2;
+
+  if(0 < m) {
+    /* Construct the sorted order of type B suffixes by using
+       the sorted order of type B* suffixes. */
+    for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
+      /* Scan the suffix array from right to left. */
+      for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
+          j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
+          i <= j;
+          --j) {
+        if(0 < (s = *j)) {
+          assert(T[s] == c1);
+          assert(((s + 1) < n) && (T[s] <= T[s + 1]));
+          assert(T[s - 1] <= T[s]);
+          c0 = T[--s];
+          *j = ~((int)c0);
+          if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
+          if(c0 != c2) {
+            if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
+            k = SA + BUCKET_B(c2 = c0, c1);
+          }
+          assert(k < j);
+          *k-- = s;
+        } else if(s != 0) {
+          *j = ~s;
+#ifndef NDEBUG
+        } else {
+          assert(T[s] == c1);
+#endif
+        }
+      }
+    }
+  }
+
+  /* Construct the BWTed string by using
+     the sorted order of type B suffixes. */
+  k = SA + BUCKET_A(c2 = T[n - 1]);
+  *k++ = (T[n - 2] < c2) ? ~((int)T[n - 2]) : (n - 1);
+  /* Scan the suffix array from left to right. */
+  for(i = SA, j = SA + n, orig = SA; i < j; ++i) {
+    if(0 < (s = *i)) {
+      assert(T[s - 1] >= T[s]);
+      c0 = T[--s];
+      *i = c0;
+      if((0 < s) && (T[s - 1] < c0)) { s = ~((int)T[s - 1]); }
+      if(c0 != c2) {
+        BUCKET_A(c2) = k - SA;
+        k = SA + BUCKET_A(c2 = c0);
+      }
+      assert(i < k);
+      *k++ = s;
+    } else if(s != 0) {
+      *i = ~s;
+    } else {
+      orig = i;
+    }
+  }
+
+  return orig - SA;
+}
+
+/* Constructs the burrows-wheeler transformed string directly
+   by using the sorted order of type B* suffixes. */
+static
+int
+construct_BWT_indexes(const unsigned char *T, int *SA,
+                      int *bucket_A, int *bucket_B,
+                      int n, int m,
+                      unsigned char * num_indexes, int * indexes) {
+  int *i, *j, *k, *orig;
+  int s;
+  int c0, c1, c2;
+
+  int mod = n / 8;
+  {
+      mod |= mod >> 1;  mod |= mod >> 2;
+      mod |= mod >> 4;  mod |= mod >> 8;
+      mod |= mod >> 16; mod >>= 1;
+
+      *num_indexes = (unsigned char)((n - 1) / (mod + 1));
+  }
+
+  if(0 < m) {
+    /* Construct the sorted order of type B suffixes by using
+       the sorted order of type B* suffixes. */
+    for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
+      /* Scan the suffix array from right to left. */
+      for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
+          j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
+          i <= j;
+          --j) {
+        if(0 < (s = *j)) {
+          assert(T[s] == c1);
+          assert(((s + 1) < n) && (T[s] <= T[s + 1]));
+          assert(T[s - 1] <= T[s]);
+
+          if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = j - SA;
+
+          c0 = T[--s];
+          *j = ~((int)c0);
+          if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
+          if(c0 != c2) {
+            if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
+            k = SA + BUCKET_B(c2 = c0, c1);
+          }
+          assert(k < j);
+          *k-- = s;
+        } else if(s != 0) {
+          *j = ~s;
+#ifndef NDEBUG
+        } else {
+          assert(T[s] == c1);
+#endif
+        }
+      }
+    }
+  }
+
+  /* Construct the BWTed string by using
+     the sorted order of type B suffixes. */
+  k = SA + BUCKET_A(c2 = T[n - 1]);
+  if (T[n - 2] < c2) {
+    if (((n - 1) & mod) == 0) indexes[(n - 1) / (mod + 1) - 1] = k - SA;
+    *k++ = ~((int)T[n - 2]);
+  }
+  else {
+    *k++ = n - 1;
+  }
+
+  /* Scan the suffix array from left to right. */
+  for(i = SA, j = SA + n, orig = SA; i < j; ++i) {
+    if(0 < (s = *i)) {
+      assert(T[s - 1] >= T[s]);
+
+      if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = i - SA;
+
+      c0 = T[--s];
+      *i = c0;
+      if(c0 != c2) {
+        BUCKET_A(c2) = k - SA;
+        k = SA + BUCKET_A(c2 = c0);
+      }
+      assert(i < k);
+      if((0 < s) && (T[s - 1] < c0)) {
+          if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = k - SA;
+          *k++ = ~((int)T[s - 1]);
+      } else
+        *k++ = s;
+    } else if(s != 0) {
+      *i = ~s;
+    } else {
+      orig = i;
+    }
+  }
+
+  return orig - SA;
+}
+
+
+/*---------------------------------------------------------------------------*/
+
+/*- Function -*/
+
+int
+divsufsort(const unsigned char *T, int *SA, int n, int openMP) {
+  int *bucket_A, *bucket_B;
+  int m;
+  int err = 0;
+
+  /* Check arguments. */
+  if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; }
+  else if(n == 0) { return 0; }
+  else if(n == 1) { SA[0] = 0; return 0; }
+  else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; }
+
+  bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int));
+  bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int));
+
+  /* Suffixsort. */
+  if((bucket_A != NULL) && (bucket_B != NULL)) {
+    m = sort_typeBstar(T, SA, bucket_A, bucket_B, n, openMP);
+    construct_SA(T, SA, bucket_A, bucket_B, n, m);
+  } else {
+    err = -2;
+  }
+
+  free(bucket_B);
+  free(bucket_A);
+
+  return err;
+}
+
+int
+divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP) {
+  int *B;
+  int *bucket_A, *bucket_B;
+  int m, pidx, i;
+
+  /* Check arguments. */
+  if((T == NULL) || (U == NULL) || (n < 0)) { return -1; }
+  else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; }
+
+  if((B = A) == NULL) { B = (int *)malloc((size_t)(n + 1) * sizeof(int)); }
+  bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int));
+  bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int));
+
+  /* Burrows-Wheeler Transform. */
+  if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) {
+    m = sort_typeBstar(T, B, bucket_A, bucket_B, n, openMP);
+
+    if (num_indexes == NULL || indexes == NULL) {
+        pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m);
+    } else {
+        pidx = construct_BWT_indexes(T, B, bucket_A, bucket_B, n, m, num_indexes, indexes);
+    }
+
+    /* Copy to output string. */
+    U[0] = T[n - 1];
+    for(i = 0; i < pidx; ++i) { U[i + 1] = (unsigned char)B[i]; }
+    for(i += 1; i < n; ++i) { U[i] = (unsigned char)B[i]; }
+    pidx += 1;
+  } else {
+    pidx = -2;
+  }
+
+  free(bucket_B);
+  free(bucket_A);
+  if(A == NULL) { free(B); }
+
+  return pidx;
+}
diff --git a/vendor/github.com/DataDog/zstd/divsufsort.h b/vendor/github.com/DataDog/zstd/divsufsort.h
new file mode 100644
index 0000000..5440994
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/divsufsort.h
@@ -0,0 +1,67 @@
+/*
+ * divsufsort.h for libdivsufsort-lite
+ * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DIVSUFSORT_H
+#define _DIVSUFSORT_H 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+/*- Prototypes -*/
+
+/**
+ * Constructs the suffix array of a given string.
+ * @param T [0..n-1] The input string.
+ * @param SA [0..n-1] The output array of suffixes.
+ * @param n The length of the given string.
+ * @param openMP enables OpenMP optimization.
+ * @return 0 if no error occurred, -1 or -2 otherwise.
+ */
+int
+divsufsort(const unsigned char *T, int *SA, int n, int openMP);
+
+/**
+ * Constructs the burrows-wheeler transformed string of a given string.
+ * @param T [0..n-1] The input string.
+ * @param U [0..n-1] The output string. (can be T)
+ * @param A [0..n-1] The temporary array. (can be NULL)
+ * @param n The length of the given string.
+ * @param num_indexes The length of secondary indexes array. (can be NULL)
+ * @param indexes The secondary indexes array. (can be NULL)
+ * @param openMP enables OpenMP optimization.
+ * @return The primary index if no error occurred, -1 or -2 otherwise.
+ */
+int
+divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP);
+
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif /* __cplusplus */
+
+#endif /* _DIVSUFSORT_H */
diff --git a/vendor/github.com/DataDog/zstd/entropy_common.c b/vendor/github.com/DataDog/zstd/entropy_common.c
new file mode 100644
index 0000000..b37a082
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/entropy_common.c
@@ -0,0 +1,221 @@
+/*
+   Common functions of New Generation Entropy library
+   Copyright (C) 2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+*************************************************************************** */
+
+/* *************************************
+*  Dependencies
+***************************************/
+#include "mem.h"
+#include "error_private.h"       /* ERR_*, ERROR */
+#define FSE_STATIC_LINKING_ONLY  /* FSE_MIN_TABLELOG */
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY  /* HUF_TABLELOG_ABSOLUTEMAX */
+#include "huf.h"
+
+
+/*===   Version   ===*/
+unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
+
+
+/*===   Error Management   ===*/
+unsigned FSE_isError(size_t code) { return ERR_isError(code); }
+const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+unsigned HUF_isError(size_t code) { return ERR_isError(code); }
+const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/*-**************************************************************
+*  FSE NCount encoding-decoding
+****************************************************************/
+size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+                 const void* headerBuffer, size_t hbSize)
+{
+    const BYTE* const istart = (const BYTE*) headerBuffer;
+    const BYTE* const iend = istart + hbSize;
+    const BYTE* ip = istart;
+    int nbBits;
+    int remaining;
+    int threshold;
+    U32 bitStream;
+    int bitCount;
+    unsigned charnum = 0;
+    int previous0 = 0;
+
+    if (hbSize < 4) return ERROR(srcSize_wrong);
+    bitStream = MEM_readLE32(ip);
+    nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */
+    if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
+    bitStream >>= 4;
+    bitCount = 4;
+    *tableLogPtr = nbBits;
+    remaining = (1<<nbBits)+1;
+    threshold = 1<<nbBits;
+    nbBits++;
+
+    while ((remaining>1) & (charnum<=*maxSVPtr)) {
+        if (previous0) {
+            unsigned n0 = charnum;
+            while ((bitStream & 0xFFFF) == 0xFFFF) {
+                n0 += 24;
+                if (ip < iend-5) {
+                    ip += 2;
+                    bitStream = MEM_readLE32(ip) >> bitCount;
+                } else {
+                    bitStream >>= 16;
+                    bitCount   += 16;
+            }   }
+            while ((bitStream & 3) == 3) {
+                n0 += 3;
+                bitStream >>= 2;
+                bitCount += 2;
+            }
+            n0 += bitStream & 3;
+            bitCount += 2;
+            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
+            while (charnum < n0) normalizedCounter[charnum++] = 0;
+            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+                ip += bitCount>>3;
+                bitCount &= 7;
+                bitStream = MEM_readLE32(ip) >> bitCount;
+            } else {
+                bitStream >>= 2;
+        }   }
+        {   int const max = (2*threshold-1) - remaining;
+            int count;
+
+            if ((bitStream & (threshold-1)) < (U32)max) {
+                count = bitStream & (threshold-1);
+                bitCount += nbBits-1;
+            } else {
+                count = bitStream & (2*threshold-1);
+                if (count >= threshold) count -= max;
+                bitCount += nbBits;
+            }
+
+            count--;   /* extra accuracy */
+            remaining -= count < 0 ? -count : count;   /* -1 means +1 */
+            normalizedCounter[charnum++] = (short)count;
+            previous0 = !count;
+            while (remaining < threshold) {
+                nbBits--;
+                threshold >>= 1;
+            }
+
+            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+                ip += bitCount>>3;
+                bitCount &= 7;
+            } else {
+                bitCount -= (int)(8 * (iend - 4 - ip));
+                ip = iend - 4;
+            }
+            bitStream = MEM_readLE32(ip) >> (bitCount & 31);
+    }   }   /* while ((remaining>1) & (charnum<=*maxSVPtr)) */
+    if (remaining != 1) return ERROR(corruption_detected);
+    if (bitCount > 32) return ERROR(corruption_detected);
+    *maxSVPtr = charnum-1;
+
+    ip += (bitCount+7)>>3;
+    return ip-istart;
+}
+
+
+/*! HUF_readStats() :
+    Read compact Huffman tree, saved by HUF_writeCTable().
+    `huffWeight` is destination buffer.
+    `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
+    @return : size read from `src` , or an error Code .
+    Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
+*/
+size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+                     U32* nbSymbolsPtr, U32* tableLogPtr,
+                     const void* src, size_t srcSize)
+{
+    U32 weightTotal;
+    const BYTE* ip = (const BYTE*) src;
+    size_t iSize;
+    size_t oSize;
+
+    if (!srcSize) return ERROR(srcSize_wrong);
+    iSize = ip[0];
+    /* memset(huffWeight, 0, hwSize);   *//* is not necessary, even though some analyzer complain ... */
+
+    if (iSize >= 128) {  /* special header */
+        oSize = iSize - 127;
+        iSize = ((oSize+1)/2);
+        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+        if (oSize >= hwSize) return ERROR(corruption_detected);
+        ip += 1;
+        {   U32 n;
+            for (n=0; n<oSize; n+=2) {
+                huffWeight[n]   = ip[n/2] >> 4;
+                huffWeight[n+1] = ip[n/2] & 15;
+    }   }   }
+    else  {   /* header compressed with FSE (normal case) */
+        FSE_DTable fseWorkspace[FSE_DTABLE_SIZE_U32(6)];  /* 6 is max possible tableLog for HUF header (maybe even 5, to be tested) */
+        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+        oSize = FSE_decompress_wksp(huffWeight, hwSize-1, ip+1, iSize, fseWorkspace, 6);   /* max (hwSize-1) values decoded, as last one is implied */
+        if (FSE_isError(oSize)) return oSize;
+    }
+
+    /* collect weight stats */
+    memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
+    weightTotal = 0;
+    {   U32 n; for (n=0; n<oSize; n++) {
+            if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected);
+            rankStats[huffWeight[n]]++;
+            weightTotal += (1 << huffWeight[n]) >> 1;
+    }   }
+    if (weightTotal == 0) return ERROR(corruption_detected);
+
+    /* get last non-null symbol weight (implied, total must be 2^n) */
+    {   U32 const tableLog = BIT_highbit32(weightTotal) + 1;
+        if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
+        *tableLogPtr = tableLog;
+        /* determine last weight */
+        {   U32 const total = 1 << tableLog;
+            U32 const rest = total - weightTotal;
+            U32 const verif = 1 << BIT_highbit32(rest);
+            U32 const lastWeight = BIT_highbit32(rest) + 1;
+            if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */
+            huffWeight[oSize] = (BYTE)lastWeight;
+            rankStats[lastWeight]++;
+    }   }
+
+    /* check tree construction validity */
+    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */
+
+    /* results */
+    *nbSymbolsPtr = (U32)(oSize+1);
+    return iSize+1;
+}
diff --git a/vendor/github.com/DataDog/zstd/error_private.c b/vendor/github.com/DataDog/zstd/error_private.c
new file mode 100644
index 0000000..d004ee6
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/error_private.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* The purpose of this file is to have a single list of error strings embedded in binary */
+
+#include "error_private.h"
+
+const char* ERR_getErrorString(ERR_enum code)
+{
+    static const char* const notErrorCode = "Unspecified error code";
+    switch( code )
+    {
+    case PREFIX(no_error): return "No error detected";
+    case PREFIX(GENERIC):  return "Error (generic)";
+    case PREFIX(prefix_unknown): return "Unknown frame descriptor";
+    case PREFIX(version_unsupported): return "Version not supported";
+    case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter";
+    case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding";
+    case PREFIX(corruption_detected): return "Corrupted block detected";
+    case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
+    case PREFIX(parameter_unsupported): return "Unsupported parameter";
+    case PREFIX(parameter_outOfBound): return "Parameter is out of bound";
+    case PREFIX(init_missing): return "Context should be init first";
+    case PREFIX(memory_allocation): return "Allocation error : not enough memory";
+    case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough";
+    case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
+    case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
+    case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large";
+    case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
+    case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
+    case PREFIX(dictionary_wrong): return "Dictionary mismatch";
+    case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
+    case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
+    case PREFIX(srcSize_wrong): return "Src size is incorrect";
+        /* following error codes are not stable and may be removed or changed in a future version */
+    case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
+    case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
+    case PREFIX(maxCode):
+    default: return notErrorCode;
+    }
+}
diff --git a/vendor/github.com/DataDog/zstd/error_private.h b/vendor/github.com/DataDog/zstd/error_private.h
new file mode 100644
index 0000000..0d2fa7e
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/error_private.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* Note : this module is expected to remain private, do not expose it */
+
+#ifndef ERROR_H_MODULE
+#define ERROR_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* ****************************************
+*  Dependencies
+******************************************/
+#include <stddef.h>        /* size_t */
+#include "zstd_errors.h"  /* enum list */
+
+
+/* ****************************************
+*  Compiler-specific
+******************************************/
+#if defined(__GNUC__)
+#  define ERR_STATIC static __attribute__((unused))
+#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+#  define ERR_STATIC static inline
+#elif defined(_MSC_VER)
+#  define ERR_STATIC static __inline
+#else
+#  define ERR_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+
+/*-****************************************
+*  Customization (error_public.h)
+******************************************/
+typedef ZSTD_ErrorCode ERR_enum;
+#define PREFIX(name) ZSTD_error_##name
+
+
+/*-****************************************
+*  Error codes handling
+******************************************/
+#undef ERROR   /* reported already defined on VS 2015 (Rich Geldreich) */
+#define ERROR(name) ZSTD_ERROR(name)
+#define ZSTD_ERROR(name) ((size_t)-PREFIX(name))
+
+ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
+
+ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }
+
+
+/*-****************************************
+*  Error Strings
+******************************************/
+
+const char* ERR_getErrorString(ERR_enum code);   /* error_private.c */
+
+ERR_STATIC const char* ERR_getErrorName(size_t code)
+{
+    return ERR_getErrorString(ERR_getErrorCode(code));
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ERROR_H_MODULE */
diff --git a/vendor/github.com/DataDog/zstd/errors.go b/vendor/github.com/DataDog/zstd/errors.go
new file mode 100644
index 0000000..38db0d5
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/errors.go
@@ -0,0 +1,35 @@
+package zstd
+
+/*
+#define ZSTD_STATIC_LINKING_ONLY
+#include "zstd.h"
+*/
+import "C"
+
+// ErrorCode is an error returned by the zstd library.
+type ErrorCode int
+
+// Error returns the error string given by zstd
+func (e ErrorCode) Error() string {
+	return C.GoString(C.ZSTD_getErrorName(C.size_t(e)))
+}
+
+func cIsError(code int) bool {
+	return int(C.ZSTD_isError(C.size_t(code))) != 0
+}
+
+// getError returns an error for the return code, or nil if it's not an error
+func getError(code int) error {
+	if code < 0 && cIsError(code) {
+		return ErrorCode(code)
+	}
+	return nil
+}
+
+// IsDstSizeTooSmallError returns whether the error correspond to zstd standard sDstSizeTooSmall error
+func IsDstSizeTooSmallError(e error) bool {
+	if e != nil && e.Error() == "Destination buffer is too small" {
+		return true
+	}
+	return false
+}
diff --git a/vendor/github.com/DataDog/zstd/fse.h b/vendor/github.com/DataDog/zstd/fse.h
new file mode 100644
index 0000000..6a1d272
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/fse.h
@@ -0,0 +1,704 @@
+/* ******************************************************************
+   FSE : Finite State Entropy codec
+   Public Prototypes declaration
+   Copyright (C) 2013-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#ifndef FSE_H
+#define FSE_H
+
+
+/*-*****************************************
+*  Dependencies
+******************************************/
+#include <stddef.h>    /* size_t, ptrdiff_t */
+
+
+/*-*****************************************
+*  FSE_PUBLIC_API : control library symbols visibility
+******************************************/
+#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
+#  define FSE_PUBLIC_API __attribute__ ((visibility ("default")))
+#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1)   /* Visual expected */
+#  define FSE_PUBLIC_API __declspec(dllexport)
+#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
+#  define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+#  define FSE_PUBLIC_API
+#endif
+
+/*------   Version   ------*/
+#define FSE_VERSION_MAJOR    0
+#define FSE_VERSION_MINOR    9
+#define FSE_VERSION_RELEASE  0
+
+#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
+#define FSE_QUOTE(str) #str
+#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
+#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
+
+#define FSE_VERSION_NUMBER  (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
+FSE_PUBLIC_API unsigned FSE_versionNumber(void);   /**< library version number; to be used when checking dll version */
+
+/*-****************************************
+*  FSE simple functions
+******************************************/
+/*! FSE_compress() :
+    Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'.
+    'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize).
+    @return : size of compressed data (<= dstCapacity).
+    Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
+                     if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.
+                     if FSE_isError(return), compression failed (more details using FSE_getErrorName())
+*/
+FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
+                             const void* src, size_t srcSize);
+
+/*! FSE_decompress():
+    Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
+    into already allocated destination buffer 'dst', of size 'dstCapacity'.
+    @return : size of regenerated data (<= maxDstSize),
+              or an error code, which can be tested using FSE_isError() .
+
+    ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!!
+    Why ? : making this distinction requires a header.
+    Header management is intentionally delegated to the user layer, which can better manage special cases.
+*/
+FSE_PUBLIC_API size_t FSE_decompress(void* dst,  size_t dstCapacity,
+                               const void* cSrc, size_t cSrcSize);
+
+
+/*-*****************************************
+*  Tool functions
+******************************************/
+FSE_PUBLIC_API size_t FSE_compressBound(size_t size);       /* maximum compressed size */
+
+/* Error Management */
+FSE_PUBLIC_API unsigned    FSE_isError(size_t code);        /* tells if a return value is an error code */
+FSE_PUBLIC_API const char* FSE_getErrorName(size_t code);   /* provides error code string (useful for debugging) */
+
+
+/*-*****************************************
+*  FSE advanced functions
+******************************************/
+/*! FSE_compress2() :
+    Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog'
+    Both parameters can be defined as '0' to mean : use default value
+    @return : size of compressed data
+    Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!!
+                     if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.
+                     if FSE_isError(return), it's an error code.
+*/
+FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
+
+
+/*-*****************************************
+*  FSE detailed API
+******************************************/
+/*!
+FSE_compress() does the following:
+1. count symbol occurrence from source[] into table count[]
+2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
+3. save normalized counters to memory buffer using writeNCount()
+4. build encoding table 'CTable' from normalized counters
+5. encode the data stream using encoding table 'CTable'
+
+FSE_decompress() does the following:
+1. read normalized counters with readNCount()
+2. build decoding table 'DTable' from normalized counters
+3. decode the data stream using decoding table 'DTable'
+
+The following API allows targeting specific sub-functions for advanced tasks.
+For example, it's possible to compress several blocks using the same 'CTable',
+or to save and provide normalized distribution using external method.
+*/
+
+/* *** COMPRESSION *** */
+
+/*! FSE_count():
+    Provides the precise count of each byte within a table 'count'.
+    'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
+    *maxSymbolValuePtr will be updated if detected smaller than initial value.
+    @return : the count of the most frequent symbol (which is not identified).
+              if return == srcSize, there is only one symbol.
+              Can also return an error code, which can be tested with FSE_isError(). */
+FSE_PUBLIC_API size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
+
+/*! FSE_optimalTableLog():
+    dynamically downsize 'tableLog' when conditions are met.
+    It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
+    @return : recommended tableLog (necessarily <= 'maxTableLog') */
+FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
+
+/*! FSE_normalizeCount():
+    normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
+    'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
+    @return : tableLog,
+              or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog, const unsigned* count, size_t srcSize, unsigned maxSymbolValue);
+
+/*! FSE_NCountWriteBound():
+    Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
+    Typically useful for allocation purpose. */
+FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_writeNCount():
+    Compactly save 'normalizedCounter' into 'buffer'.
+    @return : size of the compressed table,
+              or an errorCode, which can be tested using FSE_isError(). */
+FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+
+/*! Constructor and Destructor of FSE_CTable.
+    Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
+typedef unsigned FSE_CTable;   /* don't allocate that. It's only meant to be more restrictive than void* */
+FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog);
+FSE_PUBLIC_API void        FSE_freeCTable (FSE_CTable* ct);
+
+/*! FSE_buildCTable():
+    Builds `ct`, which must be already allocated, using FSE_createCTable().
+    @return : 0, or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_compress_usingCTable():
+    Compress `src` using `ct` into `dst` which must be already allocated.
+    @return : size of compressed data (<= `dstCapacity`),
+              or 0 if compressed data could not fit into `dst`,
+              or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
+
+/*!
+Tutorial :
+----------
+The first step is to count all symbols. FSE_count() does this job very fast.
+Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
+'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
+maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
+FSE_count() will return the number of occurrence of the most frequent symbol.
+This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
+
+The next step is to normalize the frequencies.
+FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
+It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
+You can use 'tableLog'==0 to mean "use default tableLog value".
+If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
+which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
+
+The result of FSE_normalizeCount() will be saved into a table,
+called 'normalizedCounter', which is a table of signed short.
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
+The return value is tableLog if everything proceeded as expected.
+It is 0 if there is a single symbol within distribution.
+If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
+
+'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
+'buffer' must be already allocated.
+For guaranteed success, buffer size must be at least FSE_headerBound().
+The result of the function is the number of bytes written into 'buffer'.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
+
+'normalizedCounter' can then be used to create the compression table 'CTable'.
+The space required by 'CTable' must be already allocated, using FSE_createCTable().
+You can then use FSE_buildCTable() to fill 'CTable'.
+If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
+
+'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
+Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
+The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
+If it returns '0', compressed data could not fit into 'dst'.
+If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
+*/
+
+
+/* *** DECOMPRESSION *** */
+
+/*! FSE_readNCount():
+    Read compactly saved 'normalizedCounter' from 'rBuffer'.
+    @return : size read from 'rBuffer',
+              or an errorCode, which can be tested using FSE_isError().
+              maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
+FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
+
+/*! Constructor and Destructor of FSE_DTable.
+    Note that its size depends on 'tableLog' */
+typedef unsigned FSE_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
+FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog);
+FSE_PUBLIC_API void        FSE_freeDTable(FSE_DTable* dt);
+
+/*! FSE_buildDTable():
+    Builds 'dt', which must be already allocated, using FSE_createDTable().
+    return : 0, or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSE_decompress_usingDTable():
+    Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
+    into `dst` which must be already allocated.
+    @return : size of regenerated data (necessarily <= `dstCapacity`),
+              or an errorCode, which can be tested using FSE_isError() */
+FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
+
+/*!
+Tutorial :
+----------
+(Note : these functions only decompress FSE-compressed blocks.
+ If block is uncompressed, use memcpy() instead
+ If block is a single repeated byte, use memset() instead )
+
+The first step is to obtain the normalized frequencies of symbols.
+This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
+In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
+or size the table to handle worst case situations (typically 256).
+FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
+The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
+Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
+If there is an error, the function will return an error code, which can be tested using FSE_isError().
+
+The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
+This is performed by the function FSE_buildDTable().
+The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
+If there is an error, the function will return an error code, which can be tested using FSE_isError().
+
+`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
+`cSrcSize` must be strictly correct, otherwise decompression will fail.
+FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
+If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
+*/
+
+#endif  /* FSE_H */
+
+#if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY)
+#define FSE_H_FSE_STATIC_LINKING_ONLY
+
+/* *** Dependency *** */
+#include "bitstream.h"
+
+
+/* *****************************************
+*  Static allocation
+*******************************************/
+/* FSE buffer bounds */
+#define FSE_NCOUNTBOUND 512
+#define FSE_BLOCKBOUND(size) (size + (size>>7))
+#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
+
+/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
+#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue)   (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
+#define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))
+
+/* or use the size to malloc() space directly. Pay attention to alignment restrictions though */
+#define FSE_CTABLE_SIZE(maxTableLog, maxSymbolValue)   (FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) * sizeof(FSE_CTable))
+#define FSE_DTABLE_SIZE(maxTableLog)                   (FSE_DTABLE_SIZE_U32(maxTableLog) * sizeof(FSE_DTable))
+
+
+/* *****************************************
+*  FSE advanced API
+*******************************************/
+/* FSE_count_wksp() :
+ * Same as FSE_count(), but using an externally provided scratch buffer.
+ * `workSpace` size must be table of >= `1024` unsigned
+ */
+size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+                 const void* source, size_t sourceSize, unsigned* workSpace);
+
+/** FSE_countFast() :
+ *  same as FSE_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr
+ */
+size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
+
+/* FSE_countFast_wksp() :
+ * Same as FSE_countFast(), but using an externally provided scratch buffer.
+ * `workSpace` must be a table of minimum `1024` unsigned
+ */
+size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* workSpace);
+
+/*! FSE_count_simple() :
+ * Same as FSE_countFast(), but does not use any additional memory (not even on stack).
+ * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`).
+*/
+size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
+
+
+
+unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
+/**< same as FSE_optimalTableLog(), which used `minus==2` */
+
+/* FSE_compress_wksp() :
+ * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
+ * FSE_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
+ */
+#define FSE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue)   ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
+size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
+
+size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
+/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
+
+size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
+/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
+
+/* FSE_buildCTable_wksp() :
+ * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
+ * `wkspSize` must be >= `(1<<tableLog)`.
+ */
+size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
+
+size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
+/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
+
+size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
+/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
+
+size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog);
+/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */
+
+typedef enum {
+   FSE_repeat_none,  /**< Cannot use the previous table */
+   FSE_repeat_check, /**< Can use the previous table but it must be checked */
+   FSE_repeat_valid  /**< Can use the previous table and it is asumed to be valid */
+ } FSE_repeat;
+
+/* *****************************************
+*  FSE symbol compression API
+*******************************************/
+/*!
+   This API consists of small unitary functions, which highly benefit from being inlined.
+   Hence their body are included in next section.
+*/
+typedef struct {
+    ptrdiff_t   value;
+    const void* stateTable;
+    const void* symbolTT;
+    unsigned    stateLog;
+} FSE_CState_t;
+
+static void FSE_initCState(FSE_CState_t* CStatePtr, const FSE_CTable* ct);
+
+static void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* CStatePtr, unsigned symbol);
+
+static void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* CStatePtr);
+
+/**<
+These functions are inner components of FSE_compress_usingCTable().
+They allow the creation of custom streams, mixing multiple tables and bit sources.
+
+A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
+So the first symbol you will encode is the last you will decode, like a LIFO stack.
+
+You will need a few variables to track your CStream. They are :
+
+FSE_CTable    ct;         // Provided by FSE_buildCTable()
+BIT_CStream_t bitStream;  // bitStream tracking structure
+FSE_CState_t  state;      // State tracking structure (can have several)
+
+
+The first thing to do is to init bitStream and state.
+    size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
+    FSE_initCState(&state, ct);
+
+Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
+You can then encode your input data, byte after byte.
+FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
+Remember decoding will be done in reverse direction.
+    FSE_encodeByte(&bitStream, &state, symbol);
+
+At any time, you can also add any bit sequence.
+Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
+    BIT_addBits(&bitStream, bitField, nbBits);
+
+The above methods don't commit data to memory, they just store it into local register, for speed.
+Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
+Writing data to memory is a manual operation, performed by the flushBits function.
+    BIT_flushBits(&bitStream);
+
+Your last FSE encoding operation shall be to flush your last state value(s).
+    FSE_flushState(&bitStream, &state);
+
+Finally, you must close the bitStream.
+The function returns the size of CStream in bytes.
+If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
+If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
+    size_t size = BIT_closeCStream(&bitStream);
+*/
+
+
+/* *****************************************
+*  FSE symbol decompression API
+*******************************************/
+typedef struct {
+    size_t      state;
+    const void* table;   /* precise table may vary, depending on U16 */
+} FSE_DState_t;
+
+
+static void     FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
+
+static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+
+static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
+
+/**<
+Let's now decompose FSE_decompress_usingDTable() into its unitary components.
+You will decode FSE-encoded symbols from the bitStream,
+and also any other bitFields you put in, **in reverse order**.
+
+You will need a few variables to track your bitStream. They are :
+
+BIT_DStream_t DStream;    // Stream context
+FSE_DState_t  DState;     // State context. Multiple ones are possible
+FSE_DTable*   DTablePtr;  // Decoding table, provided by FSE_buildDTable()
+
+The first thing to do is to init the bitStream.
+    errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
+
+You should then retrieve your initial state(s)
+(in reverse flushing order if you have several ones) :
+    errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
+
+You can then decode your data, symbol after symbol.
+For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
+Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
+    unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
+
+You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
+Note : maximum allowed nbBits is 25, for 32-bits compatibility
+    size_t bitField = BIT_readBits(&DStream, nbBits);
+
+All above operations only read from local register (which size depends on size_t).
+Refueling the register from memory is manually performed by the reload method.
+    endSignal = FSE_reloadDStream(&DStream);
+
+BIT_reloadDStream() result tells if there is still some more data to read from DStream.
+BIT_DStream_unfinished : there is still some data left into the DStream.
+BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
+BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
+BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
+
+When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
+to properly detect the exact end of stream.
+After each decoded symbol, check if DStream is fully consumed using this simple test :
+    BIT_reloadDStream(&DStream) >= BIT_DStream_completed
+
+When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
+Checking if DStream has reached its end is performed by :
+    BIT_endOfDStream(&DStream);
+Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
+    FSE_endOfDState(&DState);
+*/
+
+
+/* *****************************************
+*  FSE unsafe API
+*******************************************/
+static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
+
+
+/* *****************************************
+*  Implementation of inlined functions
+*******************************************/
+typedef struct {
+    int deltaFindState;
+    U32 deltaNbBits;
+} FSE_symbolCompressionTransform; /* total 8 bytes */
+
+MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)
+{
+    const void* ptr = ct;
+    const U16* u16ptr = (const U16*) ptr;
+    const U32 tableLog = MEM_read16(ptr);
+    statePtr->value = (ptrdiff_t)1<<tableLog;
+    statePtr->stateTable = u16ptr+2;
+    statePtr->symbolTT = ((const U32*)ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1));
+    statePtr->stateLog = tableLog;
+}
+
+
+/*! FSE_initCState2() :
+*   Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
+*   uses the smallest state value possible, saving the cost of this symbol */
+MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol)
+{
+    FSE_initCState(statePtr, ct);
+    {   const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
+        const U16* stateTable = (const U16*)(statePtr->stateTable);
+        U32 nbBitsOut  = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16);
+        statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
+        statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
+    }
+}
+
+MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, U32 symbol)
+{
+    FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
+    const U16* const stateTable = (const U16*)(statePtr->stateTable);
+    U32 const nbBitsOut  = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
+    BIT_addBits(bitC, statePtr->value, nbBitsOut);
+    statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
+}
+
+MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
+{
+    BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
+    BIT_flushBits(bitC);
+}
+
+
+/* ======    Decompression    ====== */
+
+typedef struct {
+    U16 tableLog;
+    U16 fastMode;
+} FSE_DTableHeader;   /* sizeof U32 */
+
+typedef struct
+{
+    unsigned short newState;
+    unsigned char  symbol;
+    unsigned char  nbBits;
+} FSE_decode_t;   /* size == U32 */
+
+MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
+{
+    const void* ptr = dt;
+    const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
+    DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
+    BIT_reloadDStream(bitD);
+    DStatePtr->table = dt + 1;
+}
+
+MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr)
+{
+    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    return DInfo.symbol;
+}
+
+MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    U32 const nbBits = DInfo.nbBits;
+    size_t const lowBits = BIT_readBits(bitD, nbBits);
+    DStatePtr->state = DInfo.newState + lowBits;
+}
+
+MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    U32 const nbBits = DInfo.nbBits;
+    BYTE const symbol = DInfo.symbol;
+    size_t const lowBits = BIT_readBits(bitD, nbBits);
+
+    DStatePtr->state = DInfo.newState + lowBits;
+    return symbol;
+}
+
+/*! FSE_decodeSymbolFast() :
+    unsafe, only works if no symbol has a probability > 50% */
+MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+    FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    U32 const nbBits = DInfo.nbBits;
+    BYTE const symbol = DInfo.symbol;
+    size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
+
+    DStatePtr->state = DInfo.newState + lowBits;
+    return symbol;
+}
+
+MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
+{
+    return DStatePtr->state == 0;
+}
+
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/* **************************************************************
+*  Tuning parameters
+****************************************************************/
+/*!MEMORY_USAGE :
+*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+*  Increasing memory usage improves compression ratio
+*  Reduced memory usage can improve speed, due to cache effect
+*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#ifndef FSE_MAX_MEMORY_USAGE
+#  define FSE_MAX_MEMORY_USAGE 14
+#endif
+#ifndef FSE_DEFAULT_MEMORY_USAGE
+#  define FSE_DEFAULT_MEMORY_USAGE 13
+#endif
+
+/*!FSE_MAX_SYMBOL_VALUE :
+*  Maximum symbol value authorized.
+*  Required for proper stack allocation */
+#ifndef FSE_MAX_SYMBOL_VALUE
+#  define FSE_MAX_SYMBOL_VALUE 255
+#endif
+
+/* **************************************************************
+*  template functions type & suffix
+****************************************************************/
+#define FSE_FUNCTION_TYPE BYTE
+#define FSE_FUNCTION_EXTENSION
+#define FSE_DECODE_TYPE FSE_decode_t
+
+
+#endif   /* !FSE_COMMONDEFS_ONLY */
+
+
+/* ***************************************************************
+*  Constants
+*****************************************************************/
+#define FSE_MAX_TABLELOG  (FSE_MAX_MEMORY_USAGE-2)
+#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
+#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
+#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
+#define FSE_MIN_TABLELOG 5
+
+#define FSE_TABLELOG_ABSOLUTE_MAX 15
+#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
+#  error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+#define FSE_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3)
+
+
+#endif /* FSE_STATIC_LINKING_ONLY */
+
+
+#if defined (__cplusplus)
+}
+#endif
diff --git a/vendor/github.com/DataDog/zstd/fse_compress.c b/vendor/github.com/DataDog/zstd/fse_compress.c
new file mode 100644
index 0000000..cb8f1fa
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/fse_compress.c
@@ -0,0 +1,849 @@
+/* ******************************************************************
+   FSE : Finite State Entropy encoder
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* **************************************************************
+*  Includes
+****************************************************************/
+#include <stdlib.h>     /* malloc, free, qsort */
+#include <string.h>     /* memcpy, memset */
+#include <stdio.h>      /* printf (debug) */
+#include "bitstream.h"
+#include "compiler.h"
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#include "error_private.h"
+
+
+/* **************************************************************
+*  Error Management
+****************************************************************/
+#define FSE_isError ERR_isError
+#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+
+
+/* **************************************************************
+*  Templates
+****************************************************************/
+/*
+  designed to be included
+  for type-specific functions (template emulation in C)
+  Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+#  error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+#  error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+
+/* Function templates */
+
+/* FSE_buildCTable_wksp() :
+ * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
+ * wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
+ * workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
+ */
+size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
+{
+    U32 const tableSize = 1 << tableLog;
+    U32 const tableMask = tableSize - 1;
+    void* const ptr = ct;
+    U16* const tableU16 = ( (U16*) ptr) + 2;
+    void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
+    FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
+    U32 const step = FSE_TABLESTEP(tableSize);
+    U32 cumul[FSE_MAX_SYMBOL_VALUE+2];
+
+    FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)workSpace;
+    U32 highThreshold = tableSize-1;
+
+    /* CTable header */
+    if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge);
+    tableU16[-2] = (U16) tableLog;
+    tableU16[-1] = (U16) maxSymbolValue;
+
+    /* For explanations on how to distribute symbol values over the table :
+    *  http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
+
+    /* symbol start positions */
+    {   U32 u;
+        cumul[0] = 0;
+        for (u=1; u<=maxSymbolValue+1; u++) {
+            if (normalizedCounter[u-1]==-1) {  /* Low proba symbol */
+                cumul[u] = cumul[u-1] + 1;
+                tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
+            } else {
+                cumul[u] = cumul[u-1] + normalizedCounter[u-1];
+        }   }
+        cumul[maxSymbolValue+1] = tableSize+1;
+    }
+
+    /* Spread symbols */
+    {   U32 position = 0;
+        U32 symbol;
+        for (symbol=0; symbol<=maxSymbolValue; symbol++) {
+            int nbOccurences;
+            for (nbOccurences=0; nbOccurences<normalizedCounter[symbol]; nbOccurences++) {
+                tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
+                position = (position + step) & tableMask;
+                while (position > highThreshold) position = (position + step) & tableMask;   /* Low proba area */
+        }   }
+
+        if (position!=0) return ERROR(GENERIC);   /* Must have gone through all positions */
+    }
+
+    /* Build table */
+    {   U32 u; for (u=0; u<tableSize; u++) {
+        FSE_FUNCTION_TYPE s = tableSymbol[u];   /* note : static analyzer may not understand tableSymbol is properly initialized */
+        tableU16[cumul[s]++] = (U16) (tableSize+u);   /* TableU16 : sorted by symbol order; gives next state value */
+    }   }
+
+    /* Build Symbol Transformation Table */
+    {   unsigned total = 0;
+        unsigned s;
+        for (s=0; s<=maxSymbolValue; s++) {
+            switch (normalizedCounter[s])
+            {
+            case  0: break;
+
+            case -1:
+            case  1:
+                symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
+                symbolTT[s].deltaFindState = total - 1;
+                total ++;
+                break;
+            default :
+                {
+                    U32 const maxBitsOut = tableLog - BIT_highbit32 (normalizedCounter[s]-1);
+                    U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
+                    symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
+                    symbolTT[s].deltaFindState = total - normalizedCounter[s];
+                    total +=  normalizedCounter[s];
+    }   }   }   }
+
+    return 0;
+}
+
+
+size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+    FSE_FUNCTION_TYPE tableSymbol[FSE_MAX_TABLESIZE];   /* memset() is not necessary, even if static analyzer complain about it */
+    return FSE_buildCTable_wksp(ct, normalizedCounter, maxSymbolValue, tableLog, tableSymbol, sizeof(tableSymbol));
+}
+
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/*-**************************************************************
+*  FSE NCount encoding-decoding
+****************************************************************/
+size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
+{
+    size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3;
+    return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND;  /* maxSymbolValue==0 ? use default */
+}
+
+static size_t FSE_writeNCount_generic (void* header, size_t headerBufferSize,
+                                       const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
+                                       unsigned writeIsSafe)
+{
+    BYTE* const ostart = (BYTE*) header;
+    BYTE* out = ostart;
+    BYTE* const oend = ostart + headerBufferSize;
+    int nbBits;
+    const int tableSize = 1 << tableLog;
+    int remaining;
+    int threshold;
+    U32 bitStream;
+    int bitCount;
+    unsigned charnum = 0;
+    int previous0 = 0;
+
+    bitStream = 0;
+    bitCount  = 0;
+    /* Table Size */
+    bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
+    bitCount  += 4;
+
+    /* Init */
+    remaining = tableSize+1;   /* +1 for extra accuracy */
+    threshold = tableSize;
+    nbBits = tableLog+1;
+
+    while (remaining>1) {  /* stops at 1 */
+        if (previous0) {
+            unsigned start = charnum;
+            while (!normalizedCounter[charnum]) charnum++;
+            while (charnum >= start+24) {
+                start+=24;
+                bitStream += 0xFFFFU << bitCount;
+                if ((!writeIsSafe) && (out > oend-2)) return ERROR(dstSize_tooSmall);   /* Buffer overflow */
+                out[0] = (BYTE) bitStream;
+                out[1] = (BYTE)(bitStream>>8);
+                out+=2;
+                bitStream>>=16;
+            }
+            while (charnum >= start+3) {
+                start+=3;
+                bitStream += 3 << bitCount;
+                bitCount += 2;
+            }
+            bitStream += (charnum-start) << bitCount;
+            bitCount += 2;
+            if (bitCount>16) {
+                if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall);   /* Buffer overflow */
+                out[0] = (BYTE)bitStream;
+                out[1] = (BYTE)(bitStream>>8);
+                out += 2;
+                bitStream >>= 16;
+                bitCount -= 16;
+        }   }
+        {   int count = normalizedCounter[charnum++];
+            int const max = (2*threshold-1)-remaining;
+            remaining -= count < 0 ? -count : count;
+            count++;   /* +1 for extra accuracy */
+            if (count>=threshold) count += max;   /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
+            bitStream += count << bitCount;
+            bitCount  += nbBits;
+            bitCount  -= (count<max);
+            previous0  = (count==1);
+            if (remaining<1) return ERROR(GENERIC);
+            while (remaining<threshold) { nbBits--; threshold>>=1; }
+        }
+        if (bitCount>16) {
+            if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall);   /* Buffer overflow */
+            out[0] = (BYTE)bitStream;
+            out[1] = (BYTE)(bitStream>>8);
+            out += 2;
+            bitStream >>= 16;
+            bitCount -= 16;
+    }   }
+
+    /* flush remaining bitStream */
+    if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall);   /* Buffer overflow */
+    out[0] = (BYTE)bitStream;
+    out[1] = (BYTE)(bitStream>>8);
+    out+= (bitCount+7) /8;
+
+    if (charnum > maxSymbolValue + 1) return ERROR(GENERIC);
+
+    return (out-ostart);
+}
+
+
+size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);   /* Unsupported */
+    if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC);   /* Unsupported */
+
+    if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
+        return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
+
+    return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1);
+}
+
+
+
+/*-**************************************************************
+*  Counting histogram
+****************************************************************/
+/*! FSE_count_simple
+    This function counts byte values within `src`, and store the histogram into table `count`.
+    It doesn't use any additional memory.
+    But this function is unsafe : it doesn't check that all values within `src` can fit into `count`.
+    For this reason, prefer using a table `count` with 256 elements.
+    @return : count of most numerous element.
+*/
+size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
+                        const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+    const BYTE* const end = ip + srcSize;
+    unsigned maxSymbolValue = *maxSymbolValuePtr;
+    unsigned max=0;
+
+    memset(count, 0, (maxSymbolValue+1)*sizeof(*count));
+    if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
+
+    while (ip<end) {
+        assert(*ip <= maxSymbolValue);
+        count[*ip++]++;
+    }
+
+    while (!count[maxSymbolValue]) maxSymbolValue--;
+    *maxSymbolValuePtr = maxSymbolValue;
+
+    { U32 s; for (s=0; s<=maxSymbolValue; s++) if (count[s] > max) max = count[s]; }
+
+    return (size_t)max;
+}
+
+
+/* FSE_count_parallel_wksp() :
+ * Same as FSE_count_parallel(), but using an externally provided scratch buffer.
+ * `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`.
+ * @return : largest histogram frequency, or an error code (notably when histogram would be larger than *maxSymbolValuePtr). */
+static size_t FSE_count_parallel_wksp(
+                                unsigned* count, unsigned* maxSymbolValuePtr,
+                                const void* source, size_t sourceSize,
+                                unsigned checkMax, unsigned* const workSpace)
+{
+    const BYTE* ip = (const BYTE*)source;
+    const BYTE* const iend = ip+sourceSize;
+    unsigned maxSymbolValue = *maxSymbolValuePtr;
+    unsigned max=0;
+    U32* const Counting1 = workSpace;
+    U32* const Counting2 = Counting1 + 256;
+    U32* const Counting3 = Counting2 + 256;
+    U32* const Counting4 = Counting3 + 256;
+
+    memset(workSpace, 0, 4*256*sizeof(unsigned));
+
+    /* safety checks */
+    if (!sourceSize) {
+        memset(count, 0, maxSymbolValue + 1);
+        *maxSymbolValuePtr = 0;
+        return 0;
+    }
+    if (!maxSymbolValue) maxSymbolValue = 255;            /* 0 == default */
+
+    /* by stripes of 16 bytes */
+    {   U32 cached = MEM_read32(ip); ip += 4;
+        while (ip < iend-15) {
+            U32 c = cached; cached = MEM_read32(ip); ip += 4;
+            Counting1[(BYTE) c     ]++;
+            Counting2[(BYTE)(c>>8) ]++;
+            Counting3[(BYTE)(c>>16)]++;
+            Counting4[       c>>24 ]++;
+            c = cached; cached = MEM_read32(ip); ip += 4;
+            Counting1[(BYTE) c     ]++;
+            Counting2[(BYTE)(c>>8) ]++;
+            Counting3[(BYTE)(c>>16)]++;
+            Counting4[       c>>24 ]++;
+            c = cached; cached = MEM_read32(ip); ip += 4;
+            Counting1[(BYTE) c     ]++;
+            Counting2[(BYTE)(c>>8) ]++;
+            Counting3[(BYTE)(c>>16)]++;
+            Counting4[       c>>24 ]++;
+            c = cached; cached = MEM_read32(ip); ip += 4;
+            Counting1[(BYTE) c     ]++;
+            Counting2[(BYTE)(c>>8) ]++;
+            Counting3[(BYTE)(c>>16)]++;
+            Counting4[       c>>24 ]++;
+        }
+        ip-=4;
+    }
+
+    /* finish last symbols */
+    while (ip<iend) Counting1[*ip++]++;
+
+    if (checkMax) {   /* verify stats will fit into destination table */
+        U32 s; for (s=255; s>maxSymbolValue; s--) {
+            Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
+            if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall);
+    }   }
+
+    {   U32 s;
+        if (maxSymbolValue > 255) maxSymbolValue = 255;
+        for (s=0; s<=maxSymbolValue; s++) {
+            count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
+            if (count[s] > max) max = count[s];
+    }   }
+
+    while (!count[maxSymbolValue]) maxSymbolValue--;
+    *maxSymbolValuePtr = maxSymbolValue;
+    return (size_t)max;
+}
+
+/* FSE_countFast_wksp() :
+ * Same as FSE_countFast(), but using an externally provided scratch buffer.
+ * `workSpace` size must be table of >= `1024` unsigned */
+size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+                          const void* source, size_t sourceSize,
+                          unsigned* workSpace)
+{
+    if (sourceSize < 1500) /* heuristic threshold */
+        return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize);
+    return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace);
+}
+
+/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
+size_t FSE_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
+                     const void* source, size_t sourceSize)
+{
+    unsigned tmpCounters[1024];
+    return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters);
+}
+
+/* FSE_count_wksp() :
+ * Same as FSE_count(), but using an externally provided scratch buffer.
+ * `workSpace` size must be table of >= `1024` unsigned */
+size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
+                 const void* source, size_t sourceSize, unsigned* workSpace)
+{
+    if (*maxSymbolValuePtr < 255)
+        return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace);
+    *maxSymbolValuePtr = 255;
+    return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace);
+}
+
+size_t FSE_count(unsigned* count, unsigned* maxSymbolValuePtr,
+                 const void* src, size_t srcSize)
+{
+    unsigned tmpCounters[1024];
+    return FSE_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters);
+}
+
+
+
+/*-**************************************************************
+*  FSE Compression Code
+****************************************************************/
+/*! FSE_sizeof_CTable() :
+    FSE_CTable is a variable size structure which contains :
+    `U16 tableLog;`
+    `U16 maxSymbolValue;`
+    `U16 nextStateNumber[1 << tableLog];`                         // This size is variable
+    `FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];`  // This size is variable
+Allocation is manual (C standard does not support variable-size structures).
+*/
+size_t FSE_sizeof_CTable (unsigned maxSymbolValue, unsigned tableLog)
+{
+    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+    return FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
+}
+
+FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
+{
+    size_t size;
+    if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
+    size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
+    return (FSE_CTable*)malloc(size);
+}
+
+void FSE_freeCTable (FSE_CTable* ct) { free(ct); }
+
+/* provides the minimum logSize to safely represent a distribution */
+static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
+{
+    U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1;
+    U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
+    U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
+    assert(srcSize > 1); /* Not supported, RLE should be used instead */
+    return minBits;
+}
+
+unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
+{
+    U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
+    U32 tableLog = maxTableLog;
+    U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
+    assert(srcSize > 1); /* Not supported, RLE should be used instead */
+    if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
+    if (maxBitsSrc < tableLog) tableLog = maxBitsSrc;   /* Accuracy can be reduced */
+    if (minBits > tableLog) tableLog = minBits;   /* Need a minimum to safely represent all symbol values */
+    if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
+    if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
+    return tableLog;
+}
+
+unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
+{
+    return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
+}
+
+
+/* Secondary normalization method.
+   To be used when primary method fails. */
+
+static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue)
+{
+    short const NOT_YET_ASSIGNED = -2;
+    U32 s;
+    U32 distributed = 0;
+    U32 ToDistribute;
+
+    /* Init */
+    U32 const lowThreshold = (U32)(total >> tableLog);
+    U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
+
+    for (s=0; s<=maxSymbolValue; s++) {
+        if (count[s] == 0) {
+            norm[s]=0;
+            continue;
+        }
+        if (count[s] <= lowThreshold) {
+            norm[s] = -1;
+            distributed++;
+            total -= count[s];
+            continue;
+        }
+        if (count[s] <= lowOne) {
+            norm[s] = 1;
+            distributed++;
+            total -= count[s];
+            continue;
+        }
+
+        norm[s]=NOT_YET_ASSIGNED;
+    }
+    ToDistribute = (1 << tableLog) - distributed;
+
+    if ((total / ToDistribute) > lowOne) {
+        /* risk of rounding to zero */
+        lowOne = (U32)((total * 3) / (ToDistribute * 2));
+        for (s=0; s<=maxSymbolValue; s++) {
+            if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
+                norm[s] = 1;
+                distributed++;
+                total -= count[s];
+                continue;
+        }   }
+        ToDistribute = (1 << tableLog) - distributed;
+    }
+
+    if (distributed == maxSymbolValue+1) {
+        /* all values are pretty poor;
+           probably incompressible data (should have already been detected);
+           find max, then give all remaining points to max */
+        U32 maxV = 0, maxC = 0;
+        for (s=0; s<=maxSymbolValue; s++)
+            if (count[s] > maxC) { maxV=s; maxC=count[s]; }
+        norm[maxV] += (short)ToDistribute;
+        return 0;
+    }
+
+    if (total == 0) {
+        /* all of the symbols were low enough for the lowOne or lowThreshold */
+        for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))
+            if (norm[s] > 0) { ToDistribute--; norm[s]++; }
+        return 0;
+    }
+
+    {   U64 const vStepLog = 62 - tableLog;
+        U64 const mid = (1ULL << (vStepLog-1)) - 1;
+        U64 const rStep = ((((U64)1<<vStepLog) * ToDistribute) + mid) / total;   /* scale on remaining */
+        U64 tmpTotal = mid;
+        for (s=0; s<=maxSymbolValue; s++) {
+            if (norm[s]==NOT_YET_ASSIGNED) {
+                U64 const end = tmpTotal + (count[s] * rStep);
+                U32 const sStart = (U32)(tmpTotal >> vStepLog);
+                U32 const sEnd = (U32)(end >> vStepLog);
+                U32 const weight = sEnd - sStart;
+                if (weight < 1)
+                    return ERROR(GENERIC);
+                norm[s] = (short)weight;
+                tmpTotal = end;
+    }   }   }
+
+    return 0;
+}
+
+
+size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
+                           const unsigned* count, size_t total,
+                           unsigned maxSymbolValue)
+{
+    /* Sanity checks */
+    if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
+    if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC);   /* Unsupported size */
+    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);   /* Unsupported size */
+    if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC);   /* Too small tableLog, compression potentially impossible */
+
+    {   static U32 const rtbTable[] = {     0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
+        U64 const scale = 62 - tableLog;
+        U64 const step = ((U64)1<<62) / total;   /* <== here, one division ! */
+        U64 const vStep = 1ULL<<(scale-20);
+        int stillToDistribute = 1<<tableLog;
+        unsigned s;
+        unsigned largest=0;
+        short largestP=0;
+        U32 lowThreshold = (U32)(total >> tableLog);
+
+        for (s=0; s<=maxSymbolValue; s++) {
+            if (count[s] == total) return 0;   /* rle special case */
+            if (count[s] == 0) { normalizedCounter[s]=0; continue; }
+            if (count[s] <= lowThreshold) {
+                normalizedCounter[s] = -1;
+                stillToDistribute--;
+            } else {
+                short proba = (short)((count[s]*step) >> scale);
+                if (proba<8) {
+                    U64 restToBeat = vStep * rtbTable[proba];
+                    proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
+                }
+                if (proba > largestP) { largestP=proba; largest=s; }
+                normalizedCounter[s] = proba;
+                stillToDistribute -= proba;
+        }   }
+        if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
+            /* corner case, need another normalization method */
+            size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue);
+            if (FSE_isError(errorCode)) return errorCode;
+        }
+        else normalizedCounter[largest] += (short)stillToDistribute;
+    }
+
+#if 0
+    {   /* Print Table (debug) */
+        U32 s;
+        U32 nTotal = 0;
+        for (s=0; s<=maxSymbolValue; s++)
+            printf("%3i: %4i \n", s, normalizedCounter[s]);
+        for (s=0; s<=maxSymbolValue; s++)
+            nTotal += abs(normalizedCounter[s]);
+        if (nTotal != (1U<<tableLog))
+            printf("Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
+        getchar();
+    }
+#endif
+
+    return tableLog;
+}
+
+
+/* fake FSE_CTable, for raw (uncompressed) input */
+size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
+{
+    const unsigned tableSize = 1 << nbBits;
+    const unsigned tableMask = tableSize - 1;
+    const unsigned maxSymbolValue = tableMask;
+    void* const ptr = ct;
+    U16* const tableU16 = ( (U16*) ptr) + 2;
+    void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1);   /* assumption : tableLog >= 1 */
+    FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
+    unsigned s;
+
+    /* Sanity checks */
+    if (nbBits < 1) return ERROR(GENERIC);             /* min size */
+
+    /* header */
+    tableU16[-2] = (U16) nbBits;
+    tableU16[-1] = (U16) maxSymbolValue;
+
+    /* Build table */
+    for (s=0; s<tableSize; s++)
+        tableU16[s] = (U16)(tableSize + s);
+
+    /* Build Symbol Transformation Table */
+    {   const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
+        for (s=0; s<=maxSymbolValue; s++) {
+            symbolTT[s].deltaNbBits = deltaNbBits;
+            symbolTT[s].deltaFindState = s-1;
+    }   }
+
+    return 0;
+}
+
+/* fake FSE_CTable, for rle input (always same symbol) */
+size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
+{
+    void* ptr = ct;
+    U16* tableU16 = ( (U16*) ptr) + 2;
+    void* FSCTptr = (U32*)ptr + 2;
+    FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*) FSCTptr;
+
+    /* header */
+    tableU16[-2] = (U16) 0;
+    tableU16[-1] = (U16) symbolValue;
+
+    /* Build table */
+    tableU16[0] = 0;
+    tableU16[1] = 0;   /* just in case */
+
+    /* Build Symbol Transformation Table */
+    symbolTT[symbolValue].deltaNbBits = 0;
+    symbolTT[symbolValue].deltaFindState = 0;
+
+    return 0;
+}
+
+
+static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,
+                           const void* src, size_t srcSize,
+                           const FSE_CTable* ct, const unsigned fast)
+{
+    const BYTE* const istart = (const BYTE*) src;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* ip=iend;
+
+    BIT_CStream_t bitC;
+    FSE_CState_t CState1, CState2;
+
+    /* init */
+    if (srcSize <= 2) return 0;
+    { size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
+      if (FSE_isError(initError)) return 0; /* not enough space available to write a bitstream */ }
+
+#define FSE_FLUSHBITS(s)  (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
+
+    if (srcSize & 1) {
+        FSE_initCState2(&CState1, ct, *--ip);
+        FSE_initCState2(&CState2, ct, *--ip);
+        FSE_encodeSymbol(&bitC, &CState1, *--ip);
+        FSE_FLUSHBITS(&bitC);
+    } else {
+        FSE_initCState2(&CState2, ct, *--ip);
+        FSE_initCState2(&CState1, ct, *--ip);
+    }
+
+    /* join to mod 4 */
+    srcSize -= 2;
+    if ((sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) {  /* test bit 2 */
+        FSE_encodeSymbol(&bitC, &CState2, *--ip);
+        FSE_encodeSymbol(&bitC, &CState1, *--ip);
+        FSE_FLUSHBITS(&bitC);
+    }
+
+    /* 2 or 4 encoding per loop */
+    while ( ip>istart ) {
+
+        FSE_encodeSymbol(&bitC, &CState2, *--ip);
+
+        if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 )   /* this test must be static */
+            FSE_FLUSHBITS(&bitC);
+
+        FSE_encodeSymbol(&bitC, &CState1, *--ip);
+
+        if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) {  /* this test must be static */
+            FSE_encodeSymbol(&bitC, &CState2, *--ip);
+            FSE_encodeSymbol(&bitC, &CState1, *--ip);
+        }
+
+        FSE_FLUSHBITS(&bitC);
+    }
+
+    FSE_flushCState(&bitC, &CState2);
+    FSE_flushCState(&bitC, &CState1);
+    return BIT_closeCStream(&bitC);
+}
+
+size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
+                           const void* src, size_t srcSize,
+                           const FSE_CTable* ct)
+{
+    unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
+
+    if (fast)
+        return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
+    else
+        return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
+}
+
+
+size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
+
+#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
+#define CHECK_F(f)   { CHECK_V_F(_var_err__, f); }
+
+/* FSE_compress_wksp() :
+ * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
+ * `wkspSize` size must be `(1<<tableLog)`.
+ */
+size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
+{
+    BYTE* const ostart = (BYTE*) dst;
+    BYTE* op = ostart;
+    BYTE* const oend = ostart + dstSize;
+
+    U32   count[FSE_MAX_SYMBOL_VALUE+1];
+    S16   norm[FSE_MAX_SYMBOL_VALUE+1];
+    FSE_CTable* CTable = (FSE_CTable*)workSpace;
+    size_t const CTableSize = FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue);
+    void* scratchBuffer = (void*)(CTable + CTableSize);
+    size_t const scratchBufferSize = wkspSize - (CTableSize * sizeof(FSE_CTable));
+
+    /* init conditions */
+    if (wkspSize < FSE_WKSP_SIZE_U32(tableLog, maxSymbolValue)) return ERROR(tableLog_tooLarge);
+    if (srcSize <= 1) return 0;  /* Not compressible */
+    if (!maxSymbolValue) maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+    if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG;
+
+    /* Scan input and build symbol stats */
+    {   CHECK_V_F(maxCount, FSE_count_wksp(count, &maxSymbolValue, src, srcSize, (unsigned*)scratchBuffer) );
+        if (maxCount == srcSize) return 1;   /* only a single symbol in src : rle */
+        if (maxCount == 1) return 0;         /* each symbol present maximum once => not compressible */
+        if (maxCount < (srcSize >> 7)) return 0;   /* Heuristic : not compressible enough */
+    }
+
+    tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue);
+    CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) );
+
+    /* Write table description header */
+    {   CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
+        op += nc_err;
+    }
+
+    /* Compress */
+    CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) );
+    {   CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) );
+        if (cSize == 0) return 0;   /* not enough space for compressed data */
+        op += cSize;
+    }
+
+    /* check compressibility */
+    if ( (size_t)(op-ostart) >= srcSize-1 ) return 0;
+
+    return op-ostart;
+}
+
+typedef struct {
+    FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)];
+    BYTE scratchBuffer[1 << FSE_MAX_TABLELOG];
+} fseWkspMax_t;
+
+size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog)
+{
+    fseWkspMax_t scratchBuffer;
+    FSE_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE));   /* compilation failures here means scratchBuffer is not large enough */
+    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+    return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer));
+}
+
+size_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG);
+}
+
+
+#endif   /* FSE_COMMONDEFS_ONLY */
diff --git a/vendor/github.com/DataDog/zstd/fse_decompress.c b/vendor/github.com/DataDog/zstd/fse_decompress.c
new file mode 100644
index 0000000..4c66c3b
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/fse_decompress.c
@@ -0,0 +1,309 @@
+/* ******************************************************************
+   FSE : Finite State Entropy decoder
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+
+/* **************************************************************
+*  Includes
+****************************************************************/
+#include <stdlib.h>     /* malloc, free, qsort */
+#include <string.h>     /* memcpy, memset */
+#include "bitstream.h"
+#include "compiler.h"
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#include "error_private.h"
+
+
+/* **************************************************************
+*  Error Management
+****************************************************************/
+#define FSE_isError ERR_isError
+#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+
+/* check and forward error code */
+#define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; }
+
+
+/* **************************************************************
+*  Templates
+****************************************************************/
+/*
+  designed to be included
+  for type-specific functions (template emulation in C)
+  Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+#  error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+#  error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+
+/* Function templates */
+FSE_DTable* FSE_createDTable (unsigned tableLog)
+{
+    if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
+    return (FSE_DTable*)malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
+}
+
+void FSE_freeDTable (FSE_DTable* dt)
+{
+    free(dt);
+}
+
+size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+    void* const tdPtr = dt+1;   /* because *dt is unsigned, 32-bits aligned on 32-bits */
+    FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
+    U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
+
+    U32 const maxSV1 = maxSymbolValue + 1;
+    U32 const tableSize = 1 << tableLog;
+    U32 highThreshold = tableSize-1;
+
+    /* Sanity Checks */
+    if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
+    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+
+    /* Init, lay down lowprob symbols */
+    {   FSE_DTableHeader DTableH;
+        DTableH.tableLog = (U16)tableLog;
+        DTableH.fastMode = 1;
+        {   S16 const largeLimit= (S16)(1 << (tableLog-1));
+            U32 s;
+            for (s=0; s<maxSV1; s++) {
+                if (normalizedCounter[s]==-1) {
+                    tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
+                    symbolNext[s] = 1;
+                } else {
+                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
+                    symbolNext[s] = normalizedCounter[s];
+        }   }   }
+        memcpy(dt, &DTableH, sizeof(DTableH));
+    }
+
+    /* Spread symbols */
+    {   U32 const tableMask = tableSize-1;
+        U32 const step = FSE_TABLESTEP(tableSize);
+        U32 s, position = 0;
+        for (s=0; s<maxSV1; s++) {
+            int i;
+            for (i=0; i<normalizedCounter[s]; i++) {
+                tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
+                position = (position + step) & tableMask;
+                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
+        }   }
+        if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+    }
+
+    /* Build Decoding table */
+    {   U32 u;
+        for (u=0; u<tableSize; u++) {
+            FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
+            U32 const nextState = symbolNext[symbol]++;
+            tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
+            tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
+    }   }
+
+    return 0;
+}
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/*-*******************************************************
+*  Decompression (Byte symbols)
+*********************************************************/
+size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
+{
+    void* ptr = dt;
+    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+    void* dPtr = dt + 1;
+    FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
+
+    DTableH->tableLog = 0;
+    DTableH->fastMode = 0;
+
+    cell->newState = 0;
+    cell->symbol = symbolValue;
+    cell->nbBits = 0;
+
+    return 0;
+}
+
+
+size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
+{
+    void* ptr = dt;
+    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+    void* dPtr = dt + 1;
+    FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
+    const unsigned tableSize = 1 << nbBits;
+    const unsigned tableMask = tableSize - 1;
+    const unsigned maxSV1 = tableMask+1;
+    unsigned s;
+
+    /* Sanity checks */
+    if (nbBits < 1) return ERROR(GENERIC);         /* min size */
+
+    /* Build Decoding Table */
+    DTableH->tableLog = (U16)nbBits;
+    DTableH->fastMode = 1;
+    for (s=0; s<maxSV1; s++) {
+        dinfo[s].newState = 0;
+        dinfo[s].symbol = (BYTE)s;
+        dinfo[s].nbBits = (BYTE)nbBits;
+    }
+
+    return 0;
+}
+
+FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
+          void* dst, size_t maxDstSize,
+    const void* cSrc, size_t cSrcSize,
+    const FSE_DTable* dt, const unsigned fast)
+{
+    BYTE* const ostart = (BYTE*) dst;
+    BYTE* op = ostart;
+    BYTE* const omax = op + maxDstSize;
+    BYTE* const olimit = omax-3;
+
+    BIT_DStream_t bitD;
+    FSE_DState_t state1;
+    FSE_DState_t state2;
+
+    /* Init */
+    CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
+
+    FSE_initDState(&state1, &bitD, dt);
+    FSE_initDState(&state2, &bitD, dt);
+
+#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
+
+    /* 4 symbols per loop */
+    for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) {
+        op[0] = FSE_GETSYMBOL(&state1);
+
+        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            BIT_reloadDStream(&bitD);
+
+        op[1] = FSE_GETSYMBOL(&state2);
+
+        if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
+
+        op[2] = FSE_GETSYMBOL(&state1);
+
+        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            BIT_reloadDStream(&bitD);
+
+        op[3] = FSE_GETSYMBOL(&state2);
+    }
+
+    /* tail */
+    /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
+    while (1) {
+        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+        *op++ = FSE_GETSYMBOL(&state1);
+        if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
+            *op++ = FSE_GETSYMBOL(&state2);
+            break;
+        }
+
+        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+        *op++ = FSE_GETSYMBOL(&state2);
+        if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
+            *op++ = FSE_GETSYMBOL(&state1);
+            break;
+    }   }
+
+    return op-ostart;
+}
+
+
+size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
+                            const void* cSrc, size_t cSrcSize,
+                            const FSE_DTable* dt)
+{
+    const void* ptr = dt;
+    const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
+    const U32 fastMode = DTableH->fastMode;
+
+    /* select fast mode (static) */
+    if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+    return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+
+size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog)
+{
+    const BYTE* const istart = (const BYTE*)cSrc;
+    const BYTE* ip = istart;
+    short counting[FSE_MAX_SYMBOL_VALUE+1];
+    unsigned tableLog;
+    unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+
+    /* normal FSE decoding mode */
+    size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
+    if (FSE_isError(NCountLength)) return NCountLength;
+    //if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */
+    if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
+    ip += NCountLength;
+    cSrcSize -= NCountLength;
+
+    CHECK_F( FSE_buildDTable (workSpace, counting, maxSymbolValue, tableLog) );
+
+    return FSE_decompress_usingDTable (dst, dstCapacity, ip, cSrcSize, workSpace);   /* always return, even if it is an error code */
+}
+
+
+typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
+
+size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize)
+{
+    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */
+    return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, dt, FSE_MAX_TABLELOG);
+}
+
+
+
+#endif   /* FSE_COMMONDEFS_ONLY */
diff --git a/vendor/github.com/DataDog/zstd/huf.h b/vendor/github.com/DataDog/zstd/huf.h
new file mode 100644
index 0000000..b4645b4
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/huf.h
@@ -0,0 +1,327 @@
+/* ******************************************************************
+   Huffman coder, part of New Generation Entropy library
+   header file
+   Copyright (C) 2013-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#ifndef HUF_H_298734234
+#define HUF_H_298734234
+
+/* *** Dependencies *** */
+#include <stddef.h>    /* size_t */
+
+
+/* *** library symbols visibility *** */
+/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,
+ *        HUF symbols remain "private" (internal symbols for library only).
+ *        Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */
+#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
+#  define HUF_PUBLIC_API __attribute__ ((visibility ("default")))
+#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1)   /* Visual expected */
+#  define HUF_PUBLIC_API __declspec(dllexport)
+#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
+#  define HUF_PUBLIC_API __declspec(dllimport)  /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */
+#else
+#  define HUF_PUBLIC_API
+#endif
+
+
+/* ========================== */
+/* ***  simple functions  *** */
+/* ========================== */
+
+/** HUF_compress() :
+ *  Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
+ * 'dst' buffer must be already allocated.
+ *  Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
+ * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
+ * @return : size of compressed data (<= `dstCapacity`).
+ *  Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
+ *                   if HUF_isError(return), compression failed (more details using HUF_getErrorName())
+ */
+HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
+                             const void* src, size_t srcSize);
+
+/** HUF_decompress() :
+ *  Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
+ *  into already allocated buffer 'dst', of minimum size 'dstSize'.
+ * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
+ *  Note : in contrast with FSE, HUF_decompress can regenerate
+ *         RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
+ *         because it knows size to regenerate (originalSize).
+ * @return : size of regenerated data (== originalSize),
+ *           or an error code, which can be tested using HUF_isError()
+ */
+HUF_PUBLIC_API size_t HUF_decompress(void* dst,  size_t originalSize,
+                               const void* cSrc, size_t cSrcSize);
+
+
+/* ***   Tool functions *** */
+#define HUF_BLOCKSIZE_MAX (128 * 1024)                  /**< maximum input size for a single block compressed with HUF_compress */
+HUF_PUBLIC_API size_t HUF_compressBound(size_t size);   /**< maximum compressed size (worst case) */
+
+/* Error Management */
+HUF_PUBLIC_API unsigned    HUF_isError(size_t code);       /**< tells if a return value is an error code */
+HUF_PUBLIC_API const char* HUF_getErrorName(size_t code);  /**< provides error code string (useful for debugging) */
+
+
+/* ***   Advanced function   *** */
+
+/** HUF_compress2() :
+ *  Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.
+ * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .
+ * `tableLog` must be `<= HUF_TABLELOG_MAX` . */
+HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
+                               const void* src, size_t srcSize,
+                               unsigned maxSymbolValue, unsigned tableLog);
+
+/** HUF_compress4X_wksp() :
+ *  Same as HUF_compress2(), but uses externally allocated `workSpace`.
+ * `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */
+#define HUF_WORKSPACE_SIZE (6 << 10)
+#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
+HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
+                                     const void* src, size_t srcSize,
+                                     unsigned maxSymbolValue, unsigned tableLog,
+                                     void* workSpace, size_t wkspSize);
+
+#endif   /* HUF_H_298734234 */
+
+/* ******************************************************************
+ *  WARNING !!
+ *  The following section contains advanced and experimental definitions
+ *  which shall never be used in the context of a dynamic library,
+ *  because they are not guaranteed to remain stable in the future.
+ *  Only consider them in association with static linking.
+ * *****************************************************************/
+#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
+#define HUF_H_HUF_STATIC_LINKING_ONLY
+
+/* *** Dependencies *** */
+#include "mem.h"   /* U32 */
+
+
+/* *** Constants *** */
+#define HUF_TABLELOG_MAX      12      /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
+#define HUF_TABLELOG_DEFAULT  11      /* default tableLog value when none specified */
+#define HUF_SYMBOLVALUE_MAX  255
+
+#define HUF_TABLELOG_ABSOLUTEMAX  15  /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
+#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
+#  error "HUF_TABLELOG_MAX is too large !"
+#endif
+
+
+/* ****************************************
+*  Static allocation
+******************************************/
+/* HUF buffer bounds */
+#define HUF_CTABLEBOUND 129
+#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true when incompressible is pre-filtered with fast heuristic */
+#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
+
+/* static allocation of HUF's Compression Table */
+#define HUF_CTABLE_SIZE_U32(maxSymbolValue)   ((maxSymbolValue)+1)   /* Use tables of U32, for proper alignment */
+#define HUF_CTABLE_SIZE(maxSymbolValue)       (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32))
+#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
+    U32 name##hb[HUF_CTABLE_SIZE_U32(maxSymbolValue)]; \
+    void* name##hv = &(name##hb); \
+    HUF_CElt* name = (HUF_CElt*)(name##hv)   /* no final ; */
+
+/* static allocation of HUF's DTable */
+typedef U32 HUF_DTable;
+#define HUF_DTABLE_SIZE(maxTableLog)   (1 + (1<<(maxTableLog)))
+#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
+        HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
+#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
+        HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
+
+
+/* ****************************************
+*  Advanced decompression functions
+******************************************/
+size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
+size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
+
+size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< decodes RLE and uncompressed */
+size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */
+size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
+size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
+size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
+size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */
+
+
+/* ****************************************
+ *  HUF detailed API
+ * ****************************************/
+
+/*! HUF_compress() does the following:
+ *  1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
+ *  2. (optional) refine tableLog using HUF_optimalTableLog()
+ *  3. build Huffman table from count using HUF_buildCTable()
+ *  4. save Huffman table to memory buffer using HUF_writeCTable()
+ *  5. encode the data stream using HUF_compress4X_usingCTable()
+ *
+ *  The following API allows targeting specific sub-functions for advanced tasks.
+ *  For example, it's possible to compress several blocks using the same 'CTable',
+ *  or to save and regenerate 'CTable' using external methods.
+ */
+unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
+typedef struct HUF_CElt_s HUF_CElt;   /* incomplete type */
+size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits);   /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
+size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
+size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
+
+typedef enum {
+   HUF_repeat_none,  /**< Cannot use the previous table */
+   HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
+   HUF_repeat_valid  /**< Can use the previous table and it is asumed to be valid */
+ } HUF_repeat;
+/** HUF_compress4X_repeat() :
+ *  Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
+ *  If it uses hufTable it does not modify hufTable or repeat.
+ *  If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
+ *  If preferRepeat then the old table will always be used if valid. */
+size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
+                       const void* src, size_t srcSize,
+                       unsigned maxSymbolValue, unsigned tableLog,
+                       void* workSpace, size_t wkspSize,    /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
+                       HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
+
+/** HUF_buildCTable_wksp() :
+ *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
+ * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
+ */
+#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
+#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
+size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize);
+
+/*! HUF_readStats() :
+ *  Read compact Huffman tree, saved by HUF_writeCTable().
+ * `huffWeight` is destination buffer.
+ * @return : size read from `src` , or an error Code .
+ *  Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
+size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
+                     U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
+                     const void* src, size_t srcSize);
+
+/** HUF_readCTable() :
+ *  Loading a CTable saved with HUF_writeCTable() */
+size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
+
+
+/*
+ * HUF_decompress() does the following:
+ * 1. select the decompression algorithm (X2, X4) based on pre-computed heuristics
+ * 2. build Huffman table from save, using HUF_readDTableX?()
+ * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()
+ */
+
+/** HUF_selectDecoder() :
+ *  Tells which decoder is likely to decode faster,
+ *  based on a set of pre-computed metrics.
+ * @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
+ *  Assumption : 0 < dstSize <= 128 KB */
+U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
+
+/**
+ *  The minimum workspace size for the `workSpace` used in
+ *  HUF_readDTableX2_wksp() and HUF_readDTableX4_wksp().
+ *
+ *  The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
+ *  HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
+ *  Buffer overflow errors may potentially occur if code modifications result in
+ *  a required workspace size greater than that specified in the following
+ *  macro.
+ */
+#define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10)
+#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
+
+size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
+size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
+size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize);
+size_t HUF_readDTableX4_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
+
+size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+
+
+/* ====================== */
+/* single stream variants */
+/* ====================== */
+
+size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
+size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);  /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
+size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
+/** HUF_compress1X_repeat() :
+ *  Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
+ *  If it uses hufTable it does not modify hufTable or repeat.
+ *  If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
+ *  If preferRepeat then the old table will always be used if valid. */
+size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
+                       const void* src, size_t srcSize,
+                       unsigned maxSymbolValue, unsigned tableLog,
+                       void* workSpace, size_t wkspSize,   /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
+                       HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
+
+size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
+size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbol decoder */
+
+size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
+size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< single-symbol decoder */
+size_t HUF_decompress1X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
+size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);   /**< double-symbols decoder */
+
+size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);   /**< automatic selection of sing or double symbol decoder, based on DTable */
+size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+size_t HUF_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
+
+/* BMI2 variants.
+ * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
+ */
+size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
+size_t HUF_decompress1X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
+size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
+size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
+
+#endif /* HUF_STATIC_LINKING_ONLY */
+
+#if defined (__cplusplus)
+}
+#endif
diff --git a/vendor/github.com/DataDog/zstd/huf_compress.c b/vendor/github.com/DataDog/zstd/huf_compress.c
new file mode 100644
index 0000000..83230b4
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/huf_compress.c
@@ -0,0 +1,788 @@
+/* ******************************************************************
+   Huffman encoder, part of New Generation Entropy library
+   Copyright (C) 2013-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* **************************************************************
+*  Compiler specifics
+****************************************************************/
+#ifdef _MSC_VER    /* Visual Studio */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#endif
+
+
+/* **************************************************************
+*  Includes
+****************************************************************/
+#include <string.h>     /* memcpy, memset */
+#include <stdio.h>      /* printf (debug) */
+#include "bitstream.h"
+#include "compiler.h"
+#define FSE_STATIC_LINKING_ONLY   /* FSE_optimalTableLog_internal */
+#include "fse.h"        /* header compression */
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#include "error_private.h"
+
+
+/* **************************************************************
+*  Error Management
+****************************************************************/
+#define HUF_isError ERR_isError
+#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e
+#define CHECK_F(f)   { CHECK_V_F(_var_err__, f); }
+
+
+/* **************************************************************
+*  Utils
+****************************************************************/
+unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
+{
+    return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
+}
+
+
+/* *******************************************************
+*  HUF : Huffman block compression
+*********************************************************/
+/* HUF_compressWeights() :
+ * Same as FSE_compress(), but dedicated to huff0's weights compression.
+ * The use case needs much less stack memory.
+ * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
+ */
+#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
+size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize)
+{
+    BYTE* const ostart = (BYTE*) dst;
+    BYTE* op = ostart;
+    BYTE* const oend = ostart + dstSize;
+
+    U32 maxSymbolValue = HUF_TABLELOG_MAX;
+    U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
+
+    FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
+    BYTE scratchBuffer[1<<MAX_FSE_TABLELOG_FOR_HUFF_HEADER];
+
+    U32 count[HUF_TABLELOG_MAX+1];
+    S16 norm[HUF_TABLELOG_MAX+1];
+
+    /* init conditions */
+    if (wtSize <= 1) return 0;  /* Not compressible */
+
+    /* Scan input and build symbol stats */
+    {   CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize) );
+        if (maxCount == wtSize) return 1;   /* only a single symbol in src : rle */
+        if (maxCount == 1) return 0;         /* each symbol present maximum once => not compressible */
+    }
+
+    tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
+    CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) );
+
+    /* Write table description header */
+    {   CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
+        op += hSize;
+    }
+
+    /* Compress */
+    CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) );
+    {   CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) );
+        if (cSize == 0) return 0;   /* not enough space for compressed data */
+        op += cSize;
+    }
+
+    return op-ostart;
+}
+
+
+struct HUF_CElt_s {
+  U16  val;
+  BYTE nbBits;
+};   /* typedef'd to HUF_CElt within "huf.h" */
+
+/*! HUF_writeCTable() :
+    `CTable` : Huffman tree to save, using huf representation.
+    @return : size of saved CTable */
+size_t HUF_writeCTable (void* dst, size_t maxDstSize,
+                        const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog)
+{
+    BYTE bitsToWeight[HUF_TABLELOG_MAX + 1];   /* precomputed conversion table */
+    BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
+    BYTE* op = (BYTE*)dst;
+    U32 n;
+
+     /* check conditions */
+    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
+
+    /* convert to weight */
+    bitsToWeight[0] = 0;
+    for (n=1; n<huffLog+1; n++)
+        bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
+    for (n=0; n<maxSymbolValue; n++)
+        huffWeight[n] = bitsToWeight[CTable[n].nbBits];
+
+    /* attempt weights compression by FSE */
+    {   CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) );
+        if ((hSize>1) & (hSize < maxSymbolValue/2)) {   /* FSE compressed */
+            op[0] = (BYTE)hSize;
+            return hSize+1;
+    }   }
+
+    /* write raw values as 4-bits (max : 15) */
+    if (maxSymbolValue > (256-128)) return ERROR(GENERIC);   /* should not happen : likely means source cannot be compressed */
+    if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall);   /* not enough space within dst buffer */
+    op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
+    huffWeight[maxSymbolValue] = 0;   /* to be sure it doesn't cause msan issue in final combination */
+    for (n=0; n<maxSymbolValue; n+=2)
+        op[(n/2)+1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n+1]);
+    return ((maxSymbolValue+1)/2) + 1;
+}
+
+
+size_t HUF_readCTable (HUF_CElt* CTable, U32* maxSymbolValuePtr, const void* src, size_t srcSize)
+{
+    BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];   /* init not required, even though some static analyzer may complain */
+    U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1];   /* large enough for values from 0 to 16 */
+    U32 tableLog = 0;
+    U32 nbSymbols = 0;
+
+    /* get symbol weights */
+    CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
+
+    /* check result */
+    if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+    if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
+
+    /* Prepare base value per rank */
+    {   U32 n, nextRankStart = 0;
+        for (n=1; n<=tableLog; n++) {
+            U32 current = nextRankStart;
+            nextRankStart += (rankVal[n] << (n-1));
+            rankVal[n] = current;
+    }   }
+
+    /* fill nbBits */
+    {   U32 n; for (n=0; n<nbSymbols; n++) {
+            const U32 w = huffWeight[n];
+            CTable[n].nbBits = (BYTE)(tableLog + 1 - w);
+    }   }
+
+    /* fill val */
+    {   U16 nbPerRank[HUF_TABLELOG_MAX+2]  = {0};  /* support w=0=>n=tableLog+1 */
+        U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
+        { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; }
+        /* determine stating value per rank */
+        valPerRank[tableLog+1] = 0;   /* for w==0 */
+        {   U16 min = 0;
+            U32 n; for (n=tableLog; n>0; n--) {  /* start at n=tablelog <-> w=1 */
+                valPerRank[n] = min;     /* get starting value within each rank */
+                min += nbPerRank[n];
+                min >>= 1;
+        }   }
+        /* assign value within rank, symbol order */
+        { U32 n; for (n=0; n<nbSymbols; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; }
+    }
+
+    *maxSymbolValuePtr = nbSymbols - 1;
+    return readSize;
+}
+
+
+typedef struct nodeElt_s {
+    U32 count;
+    U16 parent;
+    BYTE byte;
+    BYTE nbBits;
+} nodeElt;
+
+static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
+{
+    const U32 largestBits = huffNode[lastNonNull].nbBits;
+    if (largestBits <= maxNbBits) return largestBits;   /* early exit : no elt > maxNbBits */
+
+    /* there are several too large elements (at least >= 2) */
+    {   int totalCost = 0;
+        const U32 baseCost = 1 << (largestBits - maxNbBits);
+        U32 n = lastNonNull;
+
+        while (huffNode[n].nbBits > maxNbBits) {
+            totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
+            huffNode[n].nbBits = (BYTE)maxNbBits;
+            n --;
+        }  /* n stops at huffNode[n].nbBits <= maxNbBits */
+        while (huffNode[n].nbBits == maxNbBits) n--;   /* n end at index of smallest symbol using < maxNbBits */
+
+        /* renorm totalCost */
+        totalCost >>= (largestBits - maxNbBits);  /* note : totalCost is necessarily a multiple of baseCost */
+
+        /* repay normalized cost */
+        {   U32 const noSymbol = 0xF0F0F0F0;
+            U32 rankLast[HUF_TABLELOG_MAX+2];
+            int pos;
+
+            /* Get pos of last (smallest) symbol per rank */
+            memset(rankLast, 0xF0, sizeof(rankLast));
+            {   U32 currentNbBits = maxNbBits;
+                for (pos=n ; pos >= 0; pos--) {
+                    if (huffNode[pos].nbBits >= currentNbBits) continue;
+                    currentNbBits = huffNode[pos].nbBits;   /* < maxNbBits */
+                    rankLast[maxNbBits-currentNbBits] = pos;
+            }   }
+
+            while (totalCost > 0) {
+                U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1;
+                for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
+                    U32 highPos = rankLast[nBitsToDecrease];
+                    U32 lowPos = rankLast[nBitsToDecrease-1];
+                    if (highPos == noSymbol) continue;
+                    if (lowPos == noSymbol) break;
+                    {   U32 const highTotal = huffNode[highPos].count;
+                        U32 const lowTotal = 2 * huffNode[lowPos].count;
+                        if (highTotal <= lowTotal) break;
+                }   }
+                /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
+                /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
+                while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
+                    nBitsToDecrease ++;
+                totalCost -= 1 << (nBitsToDecrease-1);
+                if (rankLast[nBitsToDecrease-1] == noSymbol)
+                    rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease];   /* this rank is no longer empty */
+                huffNode[rankLast[nBitsToDecrease]].nbBits ++;
+                if (rankLast[nBitsToDecrease] == 0)    /* special case, reached largest symbol */
+                    rankLast[nBitsToDecrease] = noSymbol;
+                else {
+                    rankLast[nBitsToDecrease]--;
+                    if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
+                        rankLast[nBitsToDecrease] = noSymbol;   /* this rank is now empty */
+            }   }   /* while (totalCost > 0) */
+
+            while (totalCost < 0) {  /* Sometimes, cost correction overshoot */
+                if (rankLast[1] == noSymbol) {  /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
+                    while (huffNode[n].nbBits == maxNbBits) n--;
+                    huffNode[n+1].nbBits--;
+                    rankLast[1] = n+1;
+                    totalCost++;
+                    continue;
+                }
+                huffNode[ rankLast[1] + 1 ].nbBits--;
+                rankLast[1]++;
+                totalCost ++;
+    }   }   }   /* there are several too large elements (at least >= 2) */
+
+    return maxNbBits;
+}
+
+
+typedef struct {
+    U32 base;
+    U32 current;
+} rankPos;
+
+static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue)
+{
+    rankPos rank[32];
+    U32 n;
+
+    memset(rank, 0, sizeof(rank));
+    for (n=0; n<=maxSymbolValue; n++) {
+        U32 r = BIT_highbit32(count[n] + 1);
+        rank[r].base ++;
+    }
+    for (n=30; n>0; n--) rank[n-1].base += rank[n].base;
+    for (n=0; n<32; n++) rank[n].current = rank[n].base;
+    for (n=0; n<=maxSymbolValue; n++) {
+        U32 const c = count[n];
+        U32 const r = BIT_highbit32(c+1) + 1;
+        U32 pos = rank[r].current++;
+        while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) {
+            huffNode[pos] = huffNode[pos-1];
+            pos--;
+        }
+        huffNode[pos].count = c;
+        huffNode[pos].byte  = (BYTE)n;
+    }
+}
+
+
+/** HUF_buildCTable_wksp() :
+ *  Same as HUF_buildCTable(), but using externally allocated scratch buffer.
+ *  `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of HUF_CTABLE_WORKSPACE_SIZE_U32 unsigned.
+ */
+#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
+typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
+size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
+{
+    nodeElt* const huffNode0 = (nodeElt*)workSpace;
+    nodeElt* const huffNode = huffNode0+1;
+    U32 n, nonNullRank;
+    int lowS, lowN;
+    U16 nodeNb = STARTNODE;
+    U32 nodeRoot;
+
+    /* safety checks */
+    if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */
+    if (wkspSize < sizeof(huffNodeTable)) return ERROR(workSpace_tooSmall);
+    if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
+    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
+    memset(huffNode0, 0, sizeof(huffNodeTable));
+
+    /* sort, decreasing order */
+    HUF_sort(huffNode, count, maxSymbolValue);
+
+    /* init for parents */
+    nonNullRank = maxSymbolValue;
+    while(huffNode[nonNullRank].count == 0) nonNullRank--;
+    lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
+    huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
+    huffNode[lowS].parent = huffNode[lowS-1].parent = nodeNb;
+    nodeNb++; lowS-=2;
+    for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
+    huffNode0[0].count = (U32)(1U<<31);  /* fake entry, strong barrier */
+
+    /* create parents */
+    while (nodeNb <= nodeRoot) {
+        U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
+        U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
+        huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
+        huffNode[n1].parent = huffNode[n2].parent = nodeNb;
+        nodeNb++;
+    }
+
+    /* distribute weights (unlimited tree height) */
+    huffNode[nodeRoot].nbBits = 0;
+    for (n=nodeRoot-1; n>=STARTNODE; n--)
+        huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
+    for (n=0; n<=nonNullRank; n++)
+        huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
+
+    /* enforce maxTableLog */
+    maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits);
+
+    /* fill result into tree (val, nbBits) */
+    {   U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
+        U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
+        if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC);   /* check fit into table */
+        for (n=0; n<=nonNullRank; n++)
+            nbPerRank[huffNode[n].nbBits]++;
+        /* determine stating value per rank */
+        {   U16 min = 0;
+            for (n=maxNbBits; n>0; n--) {
+                valPerRank[n] = min;      /* get starting value within each rank */
+                min += nbPerRank[n];
+                min >>= 1;
+        }   }
+        for (n=0; n<=maxSymbolValue; n++)
+            tree[huffNode[n].byte].nbBits = huffNode[n].nbBits;   /* push nbBits per symbol, symbol order */
+        for (n=0; n<=maxSymbolValue; n++)
+            tree[n].val = valPerRank[tree[n].nbBits]++;   /* assign value within rank, symbol order */
+    }
+
+    return maxNbBits;
+}
+
+/** HUF_buildCTable() :
+ * @return : maxNbBits
+ *  Note : count is used before tree is written, so they can safely overlap
+ */
+size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits)
+{
+    huffNodeTable nodeTable;
+    return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable));
+}
+
+static size_t HUF_estimateCompressedSize(HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
+{
+    size_t nbBits = 0;
+    int s;
+    for (s = 0; s <= (int)maxSymbolValue; ++s) {
+        nbBits += CTable[s].nbBits * count[s];
+    }
+    return nbBits >> 3;
+}
+
+static int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
+  int bad = 0;
+  int s;
+  for (s = 0; s <= (int)maxSymbolValue; ++s) {
+    bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
+  }
+  return !bad;
+}
+
+size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
+
+FORCE_INLINE_TEMPLATE void
+HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable)
+{
+    BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
+}
+
+#define HUF_FLUSHBITS(s)  BIT_flushBits(s)
+
+#define HUF_FLUSHBITS_1(stream) \
+    if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream)
+
+#define HUF_FLUSHBITS_2(stream) \
+    if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream)
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
+                                   const void* src, size_t srcSize,
+                                   const HUF_CElt* CTable)
+{
+    const BYTE* ip = (const BYTE*) src;
+    BYTE* const ostart = (BYTE*)dst;
+    BYTE* const oend = ostart + dstSize;
+    BYTE* op = ostart;
+    size_t n;
+    BIT_CStream_t bitC;
+
+    /* init */
+    if (dstSize < 8) return 0;   /* not enough space to compress */
+    { size_t const initErr = BIT_initCStream(&bitC, op, oend-op);
+      if (HUF_isError(initErr)) return 0; }
+
+    n = srcSize & ~3;  /* join to mod 4 */
+    switch (srcSize & 3)
+    {
+        case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable);
+                 HUF_FLUSHBITS_2(&bitC);
+		 /* fall-through */
+        case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable);
+                 HUF_FLUSHBITS_1(&bitC);
+		 /* fall-through */
+        case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable);
+                 HUF_FLUSHBITS(&bitC);
+		 /* fall-through */
+        case 0 : /* fall-through */
+        default: break;
+    }
+
+    for (; n>0; n-=4) {  /* note : n&3==0 at this stage */
+        HUF_encodeSymbol(&bitC, ip[n- 1], CTable);
+        HUF_FLUSHBITS_1(&bitC);
+        HUF_encodeSymbol(&bitC, ip[n- 2], CTable);
+        HUF_FLUSHBITS_2(&bitC);
+        HUF_encodeSymbol(&bitC, ip[n- 3], CTable);
+        HUF_FLUSHBITS_1(&bitC);
+        HUF_encodeSymbol(&bitC, ip[n- 4], CTable);
+        HUF_FLUSHBITS(&bitC);
+    }
+
+    return BIT_closeCStream(&bitC);
+}
+
+#if DYNAMIC_BMI2
+
+static TARGET_ATTRIBUTE("bmi2") size_t
+HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
+                                   const void* src, size_t srcSize,
+                                   const HUF_CElt* CTable)
+{
+    return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
+}
+
+static size_t
+HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
+                                      const void* src, size_t srcSize,
+                                      const HUF_CElt* CTable)
+{
+    return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
+}
+
+static size_t
+HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
+                              const void* src, size_t srcSize,
+                              const HUF_CElt* CTable, const int bmi2)
+{
+    if (bmi2) {
+        return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
+    }
+    return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
+}
+
+#else
+
+static size_t
+HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
+                              const void* src, size_t srcSize,
+                              const HUF_CElt* CTable, const int bmi2)
+{
+    (void)bmi2;
+    return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
+}
+
+#endif
+
+size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
+{
+    return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
+}
+
+
+static size_t
+HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
+                              const void* src, size_t srcSize,
+                              const HUF_CElt* CTable, int bmi2)
+{
+    size_t const segmentSize = (srcSize+3)/4;   /* first 3 segments */
+    const BYTE* ip = (const BYTE*) src;
+    const BYTE* const iend = ip + srcSize;
+    BYTE* const ostart = (BYTE*) dst;
+    BYTE* const oend = ostart + dstSize;
+    BYTE* op = ostart;
+
+    if (dstSize < 6 + 1 + 1 + 1 + 8) return 0;   /* minimum space to compress successfully */
+    if (srcSize < 12) return 0;   /* no saving possible : too small input */
+    op += 6;   /* jumpTable */
+
+    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) );
+        if (cSize==0) return 0;
+        assert(cSize <= 65535);
+        MEM_writeLE16(ostart, (U16)cSize);
+        op += cSize;
+    }
+
+    ip += segmentSize;
+    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) );
+        if (cSize==0) return 0;
+        assert(cSize <= 65535);
+        MEM_writeLE16(ostart+2, (U16)cSize);
+        op += cSize;
+    }
+
+    ip += segmentSize;
+    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, segmentSize, CTable, bmi2) );
+        if (cSize==0) return 0;
+        assert(cSize <= 65535);
+        MEM_writeLE16(ostart+4, (U16)cSize);
+        op += cSize;
+    }
+
+    ip += segmentSize;
+    {   CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, oend-op, ip, iend-ip, CTable, bmi2) );
+        if (cSize==0) return 0;
+        op += cSize;
+    }
+
+    return op-ostart;
+}
+
+size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
+{
+    return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
+}
+
+
+static size_t HUF_compressCTable_internal(
+                BYTE* const ostart, BYTE* op, BYTE* const oend,
+                const void* src, size_t srcSize,
+                unsigned singleStream, const HUF_CElt* CTable, const int bmi2)
+{
+    size_t const cSize = singleStream ?
+                         HUF_compress1X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2) :
+                         HUF_compress4X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2);
+    if (HUF_isError(cSize)) { return cSize; }
+    if (cSize==0) { return 0; }   /* uncompressible */
+    op += cSize;
+    /* check compressibility */
+    if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
+    return op-ostart;
+}
+
+typedef struct {
+    U32 count[HUF_SYMBOLVALUE_MAX + 1];
+    HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];
+    huffNodeTable nodeTable;
+} HUF_compress_tables_t;
+
+/* HUF_compress_internal() :
+ * `workSpace` must a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
+static size_t HUF_compress_internal (
+                void* dst, size_t dstSize,
+                const void* src, size_t srcSize,
+                unsigned maxSymbolValue, unsigned huffLog,
+                unsigned singleStream,
+                void* workSpace, size_t wkspSize,
+                HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
+                const int bmi2)
+{
+    HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace;
+    BYTE* const ostart = (BYTE*)dst;
+    BYTE* const oend = ostart + dstSize;
+    BYTE* op = ostart;
+
+    /* checks & inits */
+    if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC);  /* must be aligned on 4-bytes boundaries */
+    if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall);
+    if (!srcSize) return 0;  /* Uncompressed */
+    if (!dstSize) return 0;  /* cannot fit anything within dst budget */
+    if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);   /* current block size limit */
+    if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+    if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
+    if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
+    if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
+
+    /* Heuristic : If old table is valid, use it for small inputs */
+    if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
+        return HUF_compressCTable_internal(ostart, op, oend,
+                                           src, srcSize,
+                                           singleStream, oldHufTable, bmi2);
+    }
+
+    /* Scan input and build symbol stats */
+    {   CHECK_V_F(largest, FSE_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->count) );
+        if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; }   /* single symbol, rle */
+        if (largest <= (srcSize >> 7)+1) return 0;   /* heuristic : probably not compressible enough */
+    }
+
+    /* Check validity of previous table */
+    if ( repeat
+      && *repeat == HUF_repeat_check
+      && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
+        *repeat = HUF_repeat_none;
+    }
+    /* Heuristic : use existing table for small inputs */
+    if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
+        return HUF_compressCTable_internal(ostart, op, oend,
+                                           src, srcSize,
+                                           singleStream, oldHufTable, bmi2);
+    }
+
+    /* Build Huffman Tree */
+    huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
+    {   CHECK_V_F(maxBits, HUF_buildCTable_wksp(table->CTable, table->count,
+                                                maxSymbolValue, huffLog,
+                                                table->nodeTable, sizeof(table->nodeTable)) );
+        huffLog = (U32)maxBits;
+        /* Zero unused symbols in CTable, so we can check it for validity */
+        memset(table->CTable + (maxSymbolValue + 1), 0,
+               sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt)));
+    }
+
+    /* Write table description header */
+    {   CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, table->CTable, maxSymbolValue, huffLog) );
+        /* Check if using previous huffman table is beneficial */
+        if (repeat && *repeat != HUF_repeat_none) {
+            size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
+            size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
+            if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
+                return HUF_compressCTable_internal(ostart, op, oend,
+                                                   src, srcSize,
+                                                   singleStream, oldHufTable, bmi2);
+        }   }
+
+        /* Use the new huffman table */
+        if (hSize + 12ul >= srcSize) { return 0; }
+        op += hSize;
+        if (repeat) { *repeat = HUF_repeat_none; }
+        if (oldHufTable)
+            memcpy(oldHufTable, table->CTable, sizeof(table->CTable));  /* Save new table */
+    }
+    return HUF_compressCTable_internal(ostart, op, oend,
+                                       src, srcSize,
+                                       singleStream, table->CTable, bmi2);
+}
+
+
+size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
+                      const void* src, size_t srcSize,
+                      unsigned maxSymbolValue, unsigned huffLog,
+                      void* workSpace, size_t wkspSize)
+{
+    return HUF_compress_internal(dst, dstSize, src, srcSize,
+                                 maxSymbolValue, huffLog, 1 /*single stream*/,
+                                 workSpace, wkspSize,
+                                 NULL, NULL, 0, 0 /*bmi2*/);
+}
+
+size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
+                      const void* src, size_t srcSize,
+                      unsigned maxSymbolValue, unsigned huffLog,
+                      void* workSpace, size_t wkspSize,
+                      HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
+{
+    return HUF_compress_internal(dst, dstSize, src, srcSize,
+                                 maxSymbolValue, huffLog, 1 /*single stream*/,
+                                 workSpace, wkspSize, hufTable,
+                                 repeat, preferRepeat, bmi2);
+}
+
+size_t HUF_compress1X (void* dst, size_t dstSize,
+                 const void* src, size_t srcSize,
+                 unsigned maxSymbolValue, unsigned huffLog)
+{
+    unsigned workSpace[HUF_WORKSPACE_SIZE_U32];
+    return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
+}
+
+/* HUF_compress4X_repeat():
+ * compress input using 4 streams.
+ * provide workspace to generate compression tables */
+size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
+                      const void* src, size_t srcSize,
+                      unsigned maxSymbolValue, unsigned huffLog,
+                      void* workSpace, size_t wkspSize)
+{
+    return HUF_compress_internal(dst, dstSize, src, srcSize,
+                                 maxSymbolValue, huffLog, 0 /*4 streams*/,
+                                 workSpace, wkspSize,
+                                 NULL, NULL, 0, 0 /*bmi2*/);
+}
+
+/* HUF_compress4X_repeat():
+ * compress input using 4 streams.
+ * re-use an existing huffman compression table */
+size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
+                      const void* src, size_t srcSize,
+                      unsigned maxSymbolValue, unsigned huffLog,
+                      void* workSpace, size_t wkspSize,
+                      HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
+{
+    return HUF_compress_internal(dst, dstSize, src, srcSize,
+                                 maxSymbolValue, huffLog, 0 /* 4 streams */,
+                                 workSpace, wkspSize,
+                                 hufTable, repeat, preferRepeat, bmi2);
+}
+
+size_t HUF_compress2 (void* dst, size_t dstSize,
+                const void* src, size_t srcSize,
+                unsigned maxSymbolValue, unsigned huffLog)
+{
+    unsigned workSpace[HUF_WORKSPACE_SIZE_U32];
+    return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
+}
+
+size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT);
+}
diff --git a/vendor/github.com/DataDog/zstd/huf_decompress.c b/vendor/github.com/DataDog/zstd/huf_decompress.c
new file mode 100644
index 0000000..73f5c46
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/huf_decompress.c
@@ -0,0 +1,1096 @@
+/* ******************************************************************
+   Huffman decoder, part of New Generation Entropy library
+   Copyright (C) 2013-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* **************************************************************
+*  Dependencies
+****************************************************************/
+#include <string.h>     /* memcpy, memset */
+#include "bitstream.h"  /* BIT_* */
+#include "compiler.h"
+#include "fse.h"        /* header compression */
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#include "error_private.h"
+
+
+/* **************************************************************
+*  Error Management
+****************************************************************/
+#define HUF_isError ERR_isError
+#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+#define CHECK_F(f) { size_t const err_ = (f); if (HUF_isError(err_)) return err_; }
+
+
+/* **************************************************************
+*  Byte alignment for workSpace management
+****************************************************************/
+#define HUF_ALIGN(x, a)         HUF_ALIGN_MASK((x), (a) - 1)
+#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
+
+
+/*-***************************/
+/*  generic DTableDesc       */
+/*-***************************/
+typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
+
+static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
+{
+    DTableDesc dtd;
+    memcpy(&dtd, table, sizeof(dtd));
+    return dtd;
+}
+
+
+/*-***************************/
+/*  single-symbol decoding   */
+/*-***************************/
+typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2;   /* single-symbol decoding */
+
+size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
+{
+    U32 tableLog = 0;
+    U32 nbSymbols = 0;
+    size_t iSize;
+    void* const dtPtr = DTable + 1;
+    HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
+
+    U32* rankVal;
+    BYTE* huffWeight;
+    size_t spaceUsed32 = 0;
+
+    rankVal = (U32 *)workSpace + spaceUsed32;
+    spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1;
+    huffWeight = (BYTE *)((U32 *)workSpace + spaceUsed32);
+    spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
+
+    if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge);
+
+    HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
+    /* memset(huffWeight, 0, sizeof(huffWeight)); */   /* is not necessary, even though some analyzer complain ... */
+
+    iSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
+    if (HUF_isError(iSize)) return iSize;
+
+    /* Table header */
+    {   DTableDesc dtd = HUF_getDTableDesc(DTable);
+        if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge);   /* DTable too small, Huffman tree cannot fit in */
+        dtd.tableType = 0;
+        dtd.tableLog = (BYTE)tableLog;
+        memcpy(DTable, &dtd, sizeof(dtd));
+    }
+
+    /* Calculate starting value for each rank */
+    {   U32 n, nextRankStart = 0;
+        for (n=1; n<tableLog+1; n++) {
+            U32 const current = nextRankStart;
+            nextRankStart += (rankVal[n] << (n-1));
+            rankVal[n] = current;
+    }   }
+
+    /* fill DTable */
+    {   U32 n;
+        for (n=0; n<nbSymbols; n++) {
+            U32 const w = huffWeight[n];
+            U32 const length = (1 << w) >> 1;
+            U32 u;
+            HUF_DEltX2 D;
+            D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
+            for (u = rankVal[w]; u < rankVal[w] + length; u++)
+                dt[u] = D;
+            rankVal[w] += length;
+    }   }
+
+    return iSize;
+}
+
+size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize)
+{
+    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+    return HUF_readDTableX2_wksp(DTable, src, srcSize,
+                                 workSpace, sizeof(workSpace));
+}
+
+typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4;  /* double-symbols decoding */
+
+FORCE_INLINE_TEMPLATE BYTE
+HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+    size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+    BYTE const c = dt[val].byte;
+    BIT_skipBits(Dstream, dt[val].nbBits);
+    return c;
+}
+
+#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+    *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr)  \
+    if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+    if (MEM_64bits()) \
+        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+HINT_INLINE size_t
+HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
+{
+    BYTE* const pStart = p;
+
+    /* up to 4 symbols at a time */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {
+        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
+        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+    }
+
+    /* [0-3] symbols remaining */
+    if (MEM_32bits())
+        while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd))
+            HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+    /* no more data to retrieve from bitstream, no need to reload */
+    while (p < pEnd)
+        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+    return pEnd-pStart;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress1X2_usingDTable_internal_body(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUF_DTable* DTable)
+{
+    BYTE* op = (BYTE*)dst;
+    BYTE* const oend = op + dstSize;
+    const void* dtPtr = DTable + 1;
+    const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+    BIT_DStream_t bitD;
+    DTableDesc const dtd = HUF_getDTableDesc(DTable);
+    U32 const dtLog = dtd.tableLog;
+
+    CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
+
+    HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog);
+
+    if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+    return dstSize;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress4X2_usingDTable_internal_body(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUF_DTable* DTable)
+{
+    /* Check */
+    if (cSrcSize < 10) return ERROR(corruption_detected);  /* strict minimum : jump table + 1 byte per stream */
+
+    {   const BYTE* const istart = (const BYTE*) cSrc;
+        BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+        const void* const dtPtr = DTable + 1;
+        const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
+
+        /* Init */
+        BIT_DStream_t bitD1;
+        BIT_DStream_t bitD2;
+        BIT_DStream_t bitD3;
+        BIT_DStream_t bitD4;
+        size_t const length1 = MEM_readLE16(istart);
+        size_t const length2 = MEM_readLE16(istart+2);
+        size_t const length3 = MEM_readLE16(istart+4);
+        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+        const BYTE* const istart1 = istart + 6;  /* jumpTable */
+        const BYTE* const istart2 = istart1 + length1;
+        const BYTE* const istart3 = istart2 + length2;
+        const BYTE* const istart4 = istart3 + length3;
+        const size_t segmentSize = (dstSize+3) / 4;
+        BYTE* const opStart2 = ostart + segmentSize;
+        BYTE* const opStart3 = opStart2 + segmentSize;
+        BYTE* const opStart4 = opStart3 + segmentSize;
+        BYTE* op1 = ostart;
+        BYTE* op2 = opStart2;
+        BYTE* op3 = opStart3;
+        BYTE* op4 = opStart4;
+        U32 endSignal = BIT_DStream_unfinished;
+        DTableDesc const dtd = HUF_getDTableDesc(DTable);
+        U32 const dtLog = dtd.tableLog;
+
+        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
+        CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
+        CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
+        CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
+        CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
+
+        /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */
+        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        while ( (endSignal==BIT_DStream_unfinished) && (op4<(oend-3)) ) {
+            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
+            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+            BIT_reloadDStream(&bitD1);
+            BIT_reloadDStream(&bitD2);
+            BIT_reloadDStream(&bitD3);
+            BIT_reloadDStream(&bitD4);
+        }
+
+        /* check corruption */
+        /* note : should not be necessary : op# advance in lock step, and we control op4.
+         *        but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */
+        if (op1 > opStart2) return ERROR(corruption_detected);
+        if (op2 > opStart3) return ERROR(corruption_detected);
+        if (op3 > opStart4) return ERROR(corruption_detected);
+        /* note : op4 supposed already verified within main loop */
+
+        /* finish bitStreams one by one */
+        HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+        HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+        HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+        HUF_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
+
+        /* check */
+        { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+          if (!endCheck) return ERROR(corruption_detected); }
+
+        /* decoded size */
+        return dstSize;
+    }
+}
+
+
+FORCE_INLINE_TEMPLATE U32
+HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
+{
+    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    memcpy(op, dt+val, 2);
+    BIT_skipBits(DStream, dt[val].nbBits);
+    return dt[val].length;
+}
+
+FORCE_INLINE_TEMPLATE U32
+HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
+{
+    size_t const val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    memcpy(op, dt+val, 1);
+    if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
+    else {
+        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
+            BIT_skipBits(DStream, dt[val].nbBits);
+            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+                /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);
+    }   }
+    return 1;
+}
+
+#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
+    ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
+    if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
+        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
+    if (MEM_64bits()) \
+        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+HINT_INLINE size_t
+HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd,
+                const HUF_DEltX4* const dt, const U32 dtLog)
+{
+    BYTE* const pStart = p;
+
+    /* up to 8 symbols at a time */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
+        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
+        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
+    }
+
+    /* closer to end : up to 2 symbols at a time */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
+        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
+
+    while (p <= pEnd-2)
+        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
+
+    if (p < pEnd)
+        p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
+
+    return p-pStart;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress1X4_usingDTable_internal_body(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUF_DTable* DTable)
+{
+    BIT_DStream_t bitD;
+
+    /* Init */
+    CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) );
+
+    /* decode */
+    {   BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+        const void* const dtPtr = DTable+1;   /* force compiler to not use strict-aliasing */
+        const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr;
+        DTableDesc const dtd = HUF_getDTableDesc(DTable);
+        HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog);
+    }
+
+    /* check */
+    if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+    /* decoded size */
+    return dstSize;
+}
+
+
+FORCE_INLINE_TEMPLATE size_t
+HUF_decompress4X4_usingDTable_internal_body(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUF_DTable* DTable)
+{
+    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
+
+    {   const BYTE* const istart = (const BYTE*) cSrc;
+        BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+        const void* const dtPtr = DTable+1;
+        const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr;
+
+        /* Init */
+        BIT_DStream_t bitD1;
+        BIT_DStream_t bitD2;
+        BIT_DStream_t bitD3;
+        BIT_DStream_t bitD4;
+        size_t const length1 = MEM_readLE16(istart);
+        size_t const length2 = MEM_readLE16(istart+2);
+        size_t const length3 = MEM_readLE16(istart+4);
+        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+        const BYTE* const istart1 = istart + 6;  /* jumpTable */
+        const BYTE* const istart2 = istart1 + length1;
+        const BYTE* const istart3 = istart2 + length2;
+        const BYTE* const istart4 = istart3 + length3;
+        size_t const segmentSize = (dstSize+3) / 4;
+        BYTE* const opStart2 = ostart + segmentSize;
+        BYTE* const opStart3 = opStart2 + segmentSize;
+        BYTE* const opStart4 = opStart3 + segmentSize;
+        BYTE* op1 = ostart;
+        BYTE* op2 = opStart2;
+        BYTE* op3 = opStart3;
+        BYTE* op4 = opStart4;
+        U32 endSignal;
+        DTableDesc const dtd = HUF_getDTableDesc(DTable);
+        U32 const dtLog = dtd.tableLog;
+
+        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
+        CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
+        CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
+        CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
+        CHECK_F( BIT_initDStream(&bitD4, istart4, length4) );
+
+        /* 16-32 symbols per loop (4-8 symbols per stream) */
+        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) {
+            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
+            HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
+            HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
+            HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
+            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
+            HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
+            HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
+            HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
+
+            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        }
+
+        /* check corruption */
+        if (op1 > opStart2) return ERROR(corruption_detected);
+        if (op2 > opStart3) return ERROR(corruption_detected);
+        if (op3 > opStart4) return ERROR(corruption_detected);
+        /* note : op4 already verified within main loop */
+
+        /* finish bitStreams one by one */
+        HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
+        HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
+        HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
+        HUF_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);
+
+        /* check */
+        { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+          if (!endCheck) return ERROR(corruption_detected); }
+
+        /* decoded size */
+        return dstSize;
+    }
+}
+
+
+typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize,
+                                               const void *cSrc,
+                                               size_t cSrcSize,
+                                               const HUF_DTable *DTable);
+#if DYNAMIC_BMI2
+
+#define X(fn)                                                               \
+                                                                            \
+    static size_t fn##_default(                                             \
+                  void* dst,  size_t dstSize,                               \
+            const void* cSrc, size_t cSrcSize,                              \
+            const HUF_DTable* DTable)                                       \
+    {                                                                       \
+        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
+    }                                                                       \
+                                                                            \
+    static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2(                       \
+                  void* dst,  size_t dstSize,                               \
+            const void* cSrc, size_t cSrcSize,                              \
+            const HUF_DTable* DTable)                                       \
+    {                                                                       \
+        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
+    }                                                                       \
+                                                                            \
+    static size_t fn(void* dst, size_t dstSize, void const* cSrc,           \
+                     size_t cSrcSize, HUF_DTable const* DTable, int bmi2)   \
+    {                                                                       \
+        if (bmi2) {                                                         \
+            return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable);         \
+        }                                                                   \
+        return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable);          \
+    }
+
+#else
+
+#define X(fn)                                                               \
+    static size_t fn(void* dst, size_t dstSize, void const* cSrc,           \
+                     size_t cSrcSize, HUF_DTable const* DTable, int bmi2)   \
+    {                                                                       \
+        (void)bmi2;                                                         \
+        return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable);             \
+    }
+
+#endif
+
+X(HUF_decompress1X2_usingDTable_internal)
+X(HUF_decompress4X2_usingDTable_internal)
+X(HUF_decompress1X4_usingDTable_internal)
+X(HUF_decompress4X4_usingDTable_internal)
+
+#undef X
+
+
+size_t HUF_decompress1X2_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUF_DTable* DTable)
+{
+    DTableDesc dtd = HUF_getDTableDesc(DTable);
+    if (dtd.tableType != 0) return ERROR(GENERIC);
+    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
+                                   const void* cSrc, size_t cSrcSize,
+                                   void* workSpace, size_t wkspSize)
+{
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
+    if (HUF_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize; cSrcSize -= hSize;
+
+    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
+}
+
+
+size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
+                              const void* cSrc, size_t cSrcSize)
+{
+    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+    return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
+                                       workSpace, sizeof(workSpace));
+}
+
+size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
+    return HUF_decompress1X2_DCtx (DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+size_t HUF_decompress4X2_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUF_DTable* DTable)
+{
+    DTableDesc dtd = HUF_getDTableDesc(DTable);
+    if (dtd.tableType != 0) return ERROR(GENERIC);
+    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
+                                   const void* cSrc, size_t cSrcSize,
+                                   void* workSpace, size_t wkspSize, int bmi2)
+{
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t const hSize = HUF_readDTableX2_wksp (dctx, cSrc, cSrcSize,
+                                                workSpace, wkspSize);
+    if (HUF_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize; cSrcSize -= hSize;
+
+    return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+}
+
+size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+                                   const void* cSrc, size_t cSrcSize,
+                                   void* workSpace, size_t wkspSize)
+{
+    return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0);
+}
+
+
+size_t HUF_decompress4X2_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+    return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
+                                       workSpace, sizeof(workSpace));
+}
+size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
+    return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+
+/* *************************/
+/* double-symbols decoding */
+/* *************************/
+typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
+
+/* HUF_fillDTableX4Level2() :
+ * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
+static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,
+                           const U32* rankValOrigin, const int minWeight,
+                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
+                           U32 nbBitsBaseline, U16 baseSeq)
+{
+    HUF_DEltX4 DElt;
+    U32 rankVal[HUF_TABLELOG_MAX + 1];
+
+    /* get pre-calculated rankVal */
+    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+    /* fill skipped values */
+    if (minWeight>1) {
+        U32 i, skipSize = rankVal[minWeight];
+        MEM_writeLE16(&(DElt.sequence), baseSeq);
+        DElt.nbBits   = (BYTE)(consumed);
+        DElt.length   = 1;
+        for (i = 0; i < skipSize; i++)
+            DTable[i] = DElt;
+    }
+
+    /* fill DTable */
+    {   U32 s; for (s=0; s<sortedListSize; s++) {   /* note : sortedSymbols already skipped */
+            const U32 symbol = sortedSymbols[s].symbol;
+            const U32 weight = sortedSymbols[s].weight;
+            const U32 nbBits = nbBitsBaseline - weight;
+            const U32 length = 1 << (sizeLog-nbBits);
+            const U32 start = rankVal[weight];
+            U32 i = start;
+            const U32 end = start + length;
+
+            MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
+            DElt.nbBits = (BYTE)(nbBits + consumed);
+            DElt.length = 2;
+            do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */
+
+            rankVal[weight] += length;
+    }   }
+}
+
+typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1];
+typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX];
+
+static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
+                           const sortedSymbol_t* sortedList, const U32 sortedListSize,
+                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
+                           const U32 nbBitsBaseline)
+{
+    U32 rankVal[HUF_TABLELOG_MAX + 1];
+    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */
+    const U32 minBits  = nbBitsBaseline - maxWeight;
+    U32 s;
+
+    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+    /* fill DTable */
+    for (s=0; s<sortedListSize; s++) {
+        const U16 symbol = sortedList[s].symbol;
+        const U32 weight = sortedList[s].weight;
+        const U32 nbBits = nbBitsBaseline - weight;
+        const U32 start = rankVal[weight];
+        const U32 length = 1 << (targetLog-nbBits);
+
+        if (targetLog-nbBits >= minBits) {   /* enough room for a second symbol */
+            U32 sortedRank;
+            int minWeight = nbBits + scaleLog;
+            if (minWeight < 1) minWeight = 1;
+            sortedRank = rankStart[minWeight];
+            HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
+                           rankValOrigin[nbBits], minWeight,
+                           sortedList+sortedRank, sortedListSize-sortedRank,
+                           nbBitsBaseline, symbol);
+        } else {
+            HUF_DEltX4 DElt;
+            MEM_writeLE16(&(DElt.sequence), symbol);
+            DElt.nbBits = (BYTE)(nbBits);
+            DElt.length = 1;
+            {   U32 const end = start + length;
+                U32 u;
+                for (u = start; u < end; u++) DTable[u] = DElt;
+        }   }
+        rankVal[weight] += length;
+    }
+}
+
+size_t HUF_readDTableX4_wksp(HUF_DTable* DTable, const void* src,
+                             size_t srcSize, void* workSpace,
+                             size_t wkspSize)
+{
+    U32 tableLog, maxW, sizeOfSort, nbSymbols;
+    DTableDesc dtd = HUF_getDTableDesc(DTable);
+    U32 const maxTableLog = dtd.maxTableLog;
+    size_t iSize;
+    void* dtPtr = DTable+1;   /* force compiler to avoid strict-aliasing */
+    HUF_DEltX4* const dt = (HUF_DEltX4*)dtPtr;
+    U32 *rankStart;
+
+    rankValCol_t* rankVal;
+    U32* rankStats;
+    U32* rankStart0;
+    sortedSymbol_t* sortedSymbol;
+    BYTE* weightList;
+    size_t spaceUsed32 = 0;
+
+    rankVal = (rankValCol_t *)((U32 *)workSpace + spaceUsed32);
+    spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2;
+    rankStats = (U32 *)workSpace + spaceUsed32;
+    spaceUsed32 += HUF_TABLELOG_MAX + 1;
+    rankStart0 = (U32 *)workSpace + spaceUsed32;
+    spaceUsed32 += HUF_TABLELOG_MAX + 2;
+    sortedSymbol = (sortedSymbol_t *)workSpace + (spaceUsed32 * sizeof(U32)) / sizeof(sortedSymbol_t);
+    spaceUsed32 += HUF_ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2;
+    weightList = (BYTE *)((U32 *)workSpace + spaceUsed32);
+    spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
+
+    if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge);
+
+    rankStart = rankStart0 + 1;
+    memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1));
+
+    HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable));   /* if compiler fails here, assertion is wrong */
+    if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
+    /* memset(weightList, 0, sizeof(weightList)); */  /* is not necessary, even though some analyzer complain ... */
+
+    iSize = HUF_readStats(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
+    if (HUF_isError(iSize)) return iSize;
+
+    /* check result */
+    if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */
+
+    /* find maxWeight */
+    for (maxW = tableLog; rankStats[maxW]==0; maxW--) {}  /* necessarily finds a solution before 0 */
+
+    /* Get start index of each weight */
+    {   U32 w, nextRankStart = 0;
+        for (w=1; w<maxW+1; w++) {
+            U32 current = nextRankStart;
+            nextRankStart += rankStats[w];
+            rankStart[w] = current;
+        }
+        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/
+        sizeOfSort = nextRankStart;
+    }
+
+    /* sort symbols by weight */
+    {   U32 s;
+        for (s=0; s<nbSymbols; s++) {
+            U32 const w = weightList[s];
+            U32 const r = rankStart[w]++;
+            sortedSymbol[r].symbol = (BYTE)s;
+            sortedSymbol[r].weight = (BYTE)w;
+        }
+        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
+    }
+
+    /* Build rankVal */
+    {   U32* const rankVal0 = rankVal[0];
+        {   int const rescale = (maxTableLog-tableLog) - 1;   /* tableLog <= maxTableLog */
+            U32 nextRankVal = 0;
+            U32 w;
+            for (w=1; w<maxW+1; w++) {
+                U32 current = nextRankVal;
+                nextRankVal += rankStats[w] << (w+rescale);
+                rankVal0[w] = current;
+        }   }
+        {   U32 const minBits = tableLog+1 - maxW;
+            U32 consumed;
+            for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
+                U32* const rankValPtr = rankVal[consumed];
+                U32 w;
+                for (w = 1; w < maxW+1; w++) {
+                    rankValPtr[w] = rankVal0[w] >> consumed;
+    }   }   }   }
+
+    HUF_fillDTableX4(dt, maxTableLog,
+                   sortedSymbol, sizeOfSort,
+                   rankStart0, rankVal, maxW,
+                   tableLog+1);
+
+    dtd.tableLog = (BYTE)maxTableLog;
+    dtd.tableType = 1;
+    memcpy(DTable, &dtd, sizeof(dtd));
+    return iSize;
+}
+
+size_t HUF_readDTableX4(HUF_DTable* DTable, const void* src, size_t srcSize)
+{
+  U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+  return HUF_readDTableX4_wksp(DTable, src, srcSize,
+                               workSpace, sizeof(workSpace));
+}
+
+size_t HUF_decompress1X4_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUF_DTable* DTable)
+{
+    DTableDesc dtd = HUF_getDTableDesc(DTable);
+    if (dtd.tableType != 1) return ERROR(GENERIC);
+    return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+size_t HUF_decompress1X4_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize,
+                                   const void* cSrc, size_t cSrcSize,
+                                   void* workSpace, size_t wkspSize)
+{
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t const hSize = HUF_readDTableX4_wksp(DCtx, cSrc, cSrcSize,
+                                               workSpace, wkspSize);
+    if (HUF_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize; cSrcSize -= hSize;
+
+    return HUF_decompress1X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
+}
+
+
+size_t HUF_decompress1X4_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
+                              const void* cSrc, size_t cSrcSize)
+{
+    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+    return HUF_decompress1X4_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
+                                       workSpace, sizeof(workSpace));
+}
+
+size_t HUF_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX);
+    return HUF_decompress1X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+size_t HUF_decompress4X4_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUF_DTable* DTable)
+{
+    DTableDesc dtd = HUF_getDTableDesc(DTable);
+    if (dtd.tableType != 1) return ERROR(GENERIC);
+    return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+static size_t HUF_decompress4X4_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
+                                   const void* cSrc, size_t cSrcSize,
+                                   void* workSpace, size_t wkspSize, int bmi2)
+{
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t hSize = HUF_readDTableX4_wksp(dctx, cSrc, cSrcSize,
+                                         workSpace, wkspSize);
+    if (HUF_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize; cSrcSize -= hSize;
+
+    return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+}
+
+size_t HUF_decompress4X4_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+                                   const void* cSrc, size_t cSrcSize,
+                                   void* workSpace, size_t wkspSize)
+{
+    return HUF_decompress4X4_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
+}
+
+
+size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
+                              const void* cSrc, size_t cSrcSize)
+{
+    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+    return HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
+                                       workSpace, sizeof(workSpace));
+}
+
+size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_TABLELOG_MAX);
+    return HUF_decompress4X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+
+/* ********************************/
+/* Generic decompression selector */
+/* ********************************/
+
+size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
+                                    const void* cSrc, size_t cSrcSize,
+                                    const HUF_DTable* DTable)
+{
+    DTableDesc const dtd = HUF_getDTableDesc(DTable);
+    return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
+                           HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
+                                    const void* cSrc, size_t cSrcSize,
+                                    const HUF_DTable* DTable)
+{
+    DTableDesc const dtd = HUF_getDTableDesc(DTable);
+    return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
+                           HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
+}
+
+
+typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
+static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
+{
+    /* single, double, quad */
+    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */
+    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */
+    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */
+    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */
+    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */
+    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */
+    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */
+    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */
+    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */
+    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */
+    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
+    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
+    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
+    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */
+    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */
+    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */
+};
+
+/** HUF_selectDecoder() :
+ *  Tells which decoder is likely to decode faster,
+ *  based on a set of pre-computed metrics.
+ * @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
+ *  Assumption : 0 < dstSize <= 128 KB */
+U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
+{
+    assert(dstSize > 0);
+    assert(dstSize <= 128 KB);
+    /* decoder timing evaluation */
+    {   U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 */
+        U32 const D256 = (U32)(dstSize >> 8);
+        U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
+        U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
+        DTime1 += DTime1 >> 3;  /* advantage to algorithm using less memory, to reduce cache eviction */
+        return DTime1 < DTime0;
+}   }
+
+
+typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+
+size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    static const decompressionAlgo decompress[2] = { HUF_decompress4X2, HUF_decompress4X4 };
+
+    /* validation checks */
+    if (dstSize == 0) return ERROR(dstSize_tooSmall);
+    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
+    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
+    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
+
+    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+        return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
+    }
+}
+
+size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    /* validation checks */
+    if (dstSize == 0) return ERROR(dstSize_tooSmall);
+    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
+    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
+    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
+
+    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+        return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
+                        HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
+    }
+}
+
+size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+    return HUF_decompress4X_hufOnly_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
+                                         workSpace, sizeof(workSpace));
+}
+
+
+size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
+                                     size_t dstSize, const void* cSrc,
+                                     size_t cSrcSize, void* workSpace,
+                                     size_t wkspSize)
+{
+    /* validation checks */
+    if (dstSize == 0) return ERROR(dstSize_tooSmall);
+    if (cSrcSize == 0) return ERROR(corruption_detected);
+
+    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+        return algoNb ? HUF_decompress4X4_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize):
+                        HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
+    }
+}
+
+size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
+                                  const void* cSrc, size_t cSrcSize,
+                                  void* workSpace, size_t wkspSize)
+{
+    /* validation checks */
+    if (dstSize == 0) return ERROR(dstSize_tooSmall);
+    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
+    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
+    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
+
+    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+        return algoNb ? HUF_decompress1X4_DCtx_wksp(dctx, dst, dstSize, cSrc,
+                                cSrcSize, workSpace, wkspSize):
+                        HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
+                                cSrcSize, workSpace, wkspSize);
+    }
+}
+
+size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
+                             const void* cSrc, size_t cSrcSize)
+{
+    U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+    return HUF_decompress1X_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
+                                      workSpace, sizeof(workSpace));
+}
+
+
+size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
+{
+    DTableDesc const dtd = HUF_getDTableDesc(DTable);
+    return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
+                           HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+}
+
+size_t HUF_decompress1X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
+{
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t const hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize);
+    if (HUF_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize; cSrcSize -= hSize;
+
+    return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2);
+}
+
+size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
+{
+    DTableDesc const dtd = HUF_getDTableDesc(DTable);
+    return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
+                           HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
+}
+
+size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
+{
+    /* validation checks */
+    if (dstSize == 0) return ERROR(dstSize_tooSmall);
+    if (cSrcSize == 0) return ERROR(corruption_detected);
+
+    {   U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
+        return algoNb ? HUF_decompress4X4_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
+                        HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
+    }
+}
diff --git a/vendor/github.com/DataDog/zstd/mem.h b/vendor/github.com/DataDog/zstd/mem.h
new file mode 100644
index 0000000..47d2300
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/mem.h
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef MEM_H_MODULE
+#define MEM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*-****************************************
+*  Dependencies
+******************************************/
+#include <stddef.h>     /* size_t, ptrdiff_t */
+#include <string.h>     /* memcpy */
+
+
+/*-****************************************
+*  Compiler specifics
+******************************************/
+#if defined(_MSC_VER)   /* Visual Studio */
+#   include <stdlib.h>  /* _byteswap_ulong */
+#   include <intrin.h>  /* _byteswap_* */
+#endif
+#if defined(__GNUC__)
+#  define MEM_STATIC static __inline __attribute__((unused))
+#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+#  define MEM_STATIC static inline
+#elif defined(_MSC_VER)
+#  define MEM_STATIC static __inline
+#else
+#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+/* code only tested on 32 and 64 bits systems */
+#define MEM_STATIC_ASSERT(c)   { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
+MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
+
+
+/*-**************************************************************
+*  Basic Types
+*****************************************************************/
+#if  !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+  typedef   uint8_t BYTE;
+  typedef  uint16_t U16;
+  typedef   int16_t S16;
+  typedef  uint32_t U32;
+  typedef   int32_t S32;
+  typedef  uint64_t U64;
+  typedef   int64_t S64;
+#else
+  typedef unsigned char      BYTE;
+  typedef unsigned short      U16;
+  typedef   signed short      S16;
+  typedef unsigned int        U32;
+  typedef   signed int        S32;
+  typedef unsigned long long  U64;
+  typedef   signed long long  S64;
+#endif
+
+
+/*-**************************************************************
+*  Memory I/O
+*****************************************************************/
+/* MEM_FORCE_MEMORY_ACCESS :
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).
+ *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ *            It can generate buggy code on targets depending on alignment.
+ *            In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6)
+ * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
+#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+#    define MEM_FORCE_MEMORY_ACCESS 2
+#  elif defined(__INTEL_COMPILER) || defined(__GNUC__)
+#    define MEM_FORCE_MEMORY_ACCESS 1
+#  endif
+#endif
+
+MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
+MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
+
+MEM_STATIC unsigned MEM_isLittleEndian(void)
+{
+    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */
+    return one.c[0];
+}
+
+#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
+
+/* violates C standard, by lying on structure alignment.
+Only use if no other choice to achieve best performance on target platform */
+MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
+MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
+MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
+MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
+MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
+MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
+
+#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+#if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32))
+    __pragma( pack(push, 1) )
+    typedef struct { U16 v; } unalign16;
+    typedef struct { U32 v; } unalign32;
+    typedef struct { U64 v; } unalign64;
+    typedef struct { size_t v; } unalignArch;
+    __pragma( pack(pop) )
+#else
+    typedef struct { U16 v; } __attribute__((packed)) unalign16;
+    typedef struct { U32 v; } __attribute__((packed)) unalign32;
+    typedef struct { U64 v; } __attribute__((packed)) unalign64;
+    typedef struct { size_t v; } __attribute__((packed)) unalignArch;
+#endif
+
+MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; }
+MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; }
+MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; }
+MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; }
+MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; }
+MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; }
+
+#else
+
+/* default method, safe and standard.
+   can sometimes prove slower */
+
+MEM_STATIC U16 MEM_read16(const void* memPtr)
+{
+    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U32 MEM_read32(const void* memPtr)
+{
+    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U64 MEM_read64(const void* memPtr)
+{
+    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC size_t MEM_readST(const void* memPtr)
+{
+    size_t val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value)
+{
+    memcpy(memPtr, &value, sizeof(value));
+}
+
+MEM_STATIC void MEM_write32(void* memPtr, U32 value)
+{
+    memcpy(memPtr, &value, sizeof(value));
+}
+
+MEM_STATIC void MEM_write64(void* memPtr, U64 value)
+{
+    memcpy(memPtr, &value, sizeof(value));
+}
+
+#endif /* MEM_FORCE_MEMORY_ACCESS */
+
+MEM_STATIC U32 MEM_swap32(U32 in)
+{
+#if defined(_MSC_VER)     /* Visual Studio */
+    return _byteswap_ulong(in);
+#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
+    return __builtin_bswap32(in);
+#else
+    return  ((in << 24) & 0xff000000 ) |
+            ((in <<  8) & 0x00ff0000 ) |
+            ((in >>  8) & 0x0000ff00 ) |
+            ((in >> 24) & 0x000000ff );
+#endif
+}
+
+MEM_STATIC U64 MEM_swap64(U64 in)
+{
+#if defined(_MSC_VER)     /* Visual Studio */
+    return _byteswap_uint64(in);
+#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
+    return __builtin_bswap64(in);
+#else
+    return  ((in << 56) & 0xff00000000000000ULL) |
+            ((in << 40) & 0x00ff000000000000ULL) |
+            ((in << 24) & 0x0000ff0000000000ULL) |
+            ((in << 8)  & 0x000000ff00000000ULL) |
+            ((in >> 8)  & 0x00000000ff000000ULL) |
+            ((in >> 24) & 0x0000000000ff0000ULL) |
+            ((in >> 40) & 0x000000000000ff00ULL) |
+            ((in >> 56) & 0x00000000000000ffULL);
+#endif
+}
+
+MEM_STATIC size_t MEM_swapST(size_t in)
+{
+    if (MEM_32bits())
+        return (size_t)MEM_swap32((U32)in);
+    else
+        return (size_t)MEM_swap64((U64)in);
+}
+
+/*=== Little endian r/w ===*/
+
+MEM_STATIC U16 MEM_readLE16(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read16(memPtr);
+    else {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U16)(p[0] + (p[1]<<8));
+    }
+}
+
+MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
+{
+    if (MEM_isLittleEndian()) {
+        MEM_write16(memPtr, val);
+    } else {
+        BYTE* p = (BYTE*)memPtr;
+        p[0] = (BYTE)val;
+        p[1] = (BYTE)(val>>8);
+    }
+}
+
+MEM_STATIC U32 MEM_readLE24(const void* memPtr)
+{
+    return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
+}
+
+MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val)
+{
+    MEM_writeLE16(memPtr, (U16)val);
+    ((BYTE*)memPtr)[2] = (BYTE)(val>>16);
+}
+
+MEM_STATIC U32 MEM_readLE32(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read32(memPtr);
+    else
+        return MEM_swap32(MEM_read32(memPtr));
+}
+
+MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32)
+{
+    if (MEM_isLittleEndian())
+        MEM_write32(memPtr, val32);
+    else
+        MEM_write32(memPtr, MEM_swap32(val32));
+}
+
+MEM_STATIC U64 MEM_readLE64(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read64(memPtr);
+    else
+        return MEM_swap64(MEM_read64(memPtr));
+}
+
+MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64)
+{
+    if (MEM_isLittleEndian())
+        MEM_write64(memPtr, val64);
+    else
+        MEM_write64(memPtr, MEM_swap64(val64));
+}
+
+MEM_STATIC size_t MEM_readLEST(const void* memPtr)
+{
+    if (MEM_32bits())
+        return (size_t)MEM_readLE32(memPtr);
+    else
+        return (size_t)MEM_readLE64(memPtr);
+}
+
+MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val)
+{
+    if (MEM_32bits())
+        MEM_writeLE32(memPtr, (U32)val);
+    else
+        MEM_writeLE64(memPtr, (U64)val);
+}
+
+/*=== Big endian r/w ===*/
+
+MEM_STATIC U32 MEM_readBE32(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_swap32(MEM_read32(memPtr));
+    else
+        return MEM_read32(memPtr);
+}
+
+MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32)
+{
+    if (MEM_isLittleEndian())
+        MEM_write32(memPtr, MEM_swap32(val32));
+    else
+        MEM_write32(memPtr, val32);
+}
+
+MEM_STATIC U64 MEM_readBE64(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_swap64(MEM_read64(memPtr));
+    else
+        return MEM_read64(memPtr);
+}
+
+MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64)
+{
+    if (MEM_isLittleEndian())
+        MEM_write64(memPtr, MEM_swap64(val64));
+    else
+        MEM_write64(memPtr, val64);
+}
+
+MEM_STATIC size_t MEM_readBEST(const void* memPtr)
+{
+    if (MEM_32bits())
+        return (size_t)MEM_readBE32(memPtr);
+    else
+        return (size_t)MEM_readBE64(memPtr);
+}
+
+MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
+{
+    if (MEM_32bits())
+        MEM_writeBE32(memPtr, (U32)val);
+    else
+        MEM_writeBE64(memPtr, (U64)val);
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* MEM_H_MODULE */
diff --git a/vendor/github.com/DataDog/zstd/pool.c b/vendor/github.com/DataDog/zstd/pool.c
new file mode 100644
index 0000000..773488b
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/pool.c
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/* ======   Dependencies   ======= */
+#include <stddef.h>  /* size_t */
+#include "pool.h"
+#include "zstd_internal.h"  /* ZSTD_malloc, ZSTD_free */
+
+/* ======   Compiler specifics   ====== */
+#if defined(_MSC_VER)
+#  pragma warning(disable : 4204)        /* disable: C4204: non-constant aggregate initializer */
+#endif
+
+
+#ifdef ZSTD_MULTITHREAD
+
+#include "threading.h"   /* pthread adaptation */
+
+/* A job is a function and an opaque argument */
+typedef struct POOL_job_s {
+    POOL_function function;
+    void *opaque;
+} POOL_job;
+
+struct POOL_ctx_s {
+    ZSTD_customMem customMem;
+    /* Keep track of the threads */
+    ZSTD_pthread_t *threads;
+    size_t numThreads;
+
+    /* The queue is a circular buffer */
+    POOL_job *queue;
+    size_t queueHead;
+    size_t queueTail;
+    size_t queueSize;
+
+    /* The number of threads working on jobs */
+    size_t numThreadsBusy;
+    /* Indicates if the queue is empty */
+    int queueEmpty;
+
+    /* The mutex protects the queue */
+    ZSTD_pthread_mutex_t queueMutex;
+    /* Condition variable for pushers to wait on when the queue is full */
+    ZSTD_pthread_cond_t queuePushCond;
+    /* Condition variables for poppers to wait on when the queue is empty */
+    ZSTD_pthread_cond_t queuePopCond;
+    /* Indicates if the queue is shutting down */
+    int shutdown;
+};
+
+/* POOL_thread() :
+   Work thread for the thread pool.
+   Waits for jobs and executes them.
+   @returns : NULL on failure else non-null.
+*/
+static void* POOL_thread(void* opaque) {
+    POOL_ctx* const ctx = (POOL_ctx*)opaque;
+    if (!ctx) { return NULL; }
+    for (;;) {
+        /* Lock the mutex and wait for a non-empty queue or until shutdown */
+        ZSTD_pthread_mutex_lock(&ctx->queueMutex);
+
+        while (ctx->queueEmpty && !ctx->shutdown) {
+            ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
+        }
+        /* empty => shutting down: so stop */
+        if (ctx->queueEmpty) {
+            ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+            return opaque;
+        }
+        /* Pop a job off the queue */
+        {   POOL_job const job = ctx->queue[ctx->queueHead];
+            ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
+            ctx->numThreadsBusy++;
+            ctx->queueEmpty = ctx->queueHead == ctx->queueTail;
+            /* Unlock the mutex, signal a pusher, and run the job */
+            ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+            ZSTD_pthread_cond_signal(&ctx->queuePushCond);
+
+            job.function(job.opaque);
+
+            /* If the intended queue size was 0, signal after finishing job */
+            if (ctx->queueSize == 1) {
+                ZSTD_pthread_mutex_lock(&ctx->queueMutex);
+                ctx->numThreadsBusy--;
+                ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+                ZSTD_pthread_cond_signal(&ctx->queuePushCond);
+        }   }
+    }  /* for (;;) */
+    /* Unreachable */
+}
+
+POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
+    return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
+}
+
+POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) {
+    POOL_ctx* ctx;
+    /* Check the parameters */
+    if (!numThreads) { return NULL; }
+    /* Allocate the context and zero initialize */
+    ctx = (POOL_ctx*)ZSTD_calloc(sizeof(POOL_ctx), customMem);
+    if (!ctx) { return NULL; }
+    /* Initialize the job queue.
+     * It needs one extra space since one space is wasted to differentiate empty
+     * and full queues.
+     */
+    ctx->queueSize = queueSize + 1;
+    ctx->queue = (POOL_job*)ZSTD_malloc(ctx->queueSize * sizeof(POOL_job), customMem);
+    ctx->queueHead = 0;
+    ctx->queueTail = 0;
+    ctx->numThreadsBusy = 0;
+    ctx->queueEmpty = 1;
+    (void)ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);
+    (void)ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL);
+    (void)ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);
+    ctx->shutdown = 0;
+    /* Allocate space for the thread handles */
+    ctx->threads = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
+    ctx->numThreads = 0;
+    ctx->customMem = customMem;
+    /* Check for errors */
+    if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
+    /* Initialize the threads */
+    {   size_t i;
+        for (i = 0; i < numThreads; ++i) {
+            if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {
+                ctx->numThreads = i;
+                POOL_free(ctx);
+                return NULL;
+        }   }
+        ctx->numThreads = numThreads;
+    }
+    return ctx;
+}
+
+/*! POOL_join() :
+    Shutdown the queue, wake any sleeping threads, and join all of the threads.
+*/
+static void POOL_join(POOL_ctx* ctx) {
+    /* Shut down the queue */
+    ZSTD_pthread_mutex_lock(&ctx->queueMutex);
+    ctx->shutdown = 1;
+    ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+    /* Wake up sleeping threads */
+    ZSTD_pthread_cond_broadcast(&ctx->queuePushCond);
+    ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
+    /* Join all of the threads */
+    {   size_t i;
+        for (i = 0; i < ctx->numThreads; ++i) {
+            ZSTD_pthread_join(ctx->threads[i], NULL);
+    }   }
+}
+
+void POOL_free(POOL_ctx *ctx) {
+    if (!ctx) { return; }
+    POOL_join(ctx);
+    ZSTD_pthread_mutex_destroy(&ctx->queueMutex);
+    ZSTD_pthread_cond_destroy(&ctx->queuePushCond);
+    ZSTD_pthread_cond_destroy(&ctx->queuePopCond);
+    ZSTD_free(ctx->queue, ctx->customMem);
+    ZSTD_free(ctx->threads, ctx->customMem);
+    ZSTD_free(ctx, ctx->customMem);
+}
+
+size_t POOL_sizeof(POOL_ctx *ctx) {
+    if (ctx==NULL) return 0;  /* supports sizeof NULL */
+    return sizeof(*ctx)
+        + ctx->queueSize * sizeof(POOL_job)
+        + ctx->numThreads * sizeof(ZSTD_pthread_t);
+}
+
+/**
+ * Returns 1 if the queue is full and 0 otherwise.
+ *
+ * If the queueSize is 1 (the pool was created with an intended queueSize of 0),
+ * then a queue is empty if there is a thread free and no job is waiting.
+ */
+static int isQueueFull(POOL_ctx const* ctx) {
+    if (ctx->queueSize > 1) {
+        return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize);
+    } else {
+        return ctx->numThreadsBusy == ctx->numThreads ||
+               !ctx->queueEmpty;
+    }
+}
+
+
+static void POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque)
+{
+    POOL_job const job = {function, opaque};
+    assert(ctx != NULL);
+    if (ctx->shutdown) return;
+
+    ctx->queueEmpty = 0;
+    ctx->queue[ctx->queueTail] = job;
+    ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize;
+    ZSTD_pthread_cond_signal(&ctx->queuePopCond);
+}
+
+void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque)
+{
+    assert(ctx != NULL);
+    ZSTD_pthread_mutex_lock(&ctx->queueMutex);
+    /* Wait until there is space in the queue for the new job */
+    while (isQueueFull(ctx) && (!ctx->shutdown)) {
+        ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
+    }
+    POOL_add_internal(ctx, function, opaque);
+    ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+}
+
+
+int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque)
+{
+    assert(ctx != NULL);
+    ZSTD_pthread_mutex_lock(&ctx->queueMutex);
+    if (isQueueFull(ctx)) {
+        ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+        return 0;
+    }
+    POOL_add_internal(ctx, function, opaque);
+    ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
+    return 1;
+}
+
+
+#else  /* ZSTD_MULTITHREAD  not defined */
+
+/* ========================== */
+/* No multi-threading support */
+/* ========================== */
+
+
+/* We don't need any data, but if it is empty, malloc() might return NULL. */
+struct POOL_ctx_s {
+    int dummy;
+};
+static POOL_ctx g_ctx;
+
+POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
+    return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
+}
+
+POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) {
+    (void)numThreads;
+    (void)queueSize;
+    (void)customMem;
+    return &g_ctx;
+}
+
+void POOL_free(POOL_ctx* ctx) {
+    assert(!ctx || ctx == &g_ctx);
+    (void)ctx;
+}
+
+void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) {
+    (void)ctx;
+    function(opaque);
+}
+
+int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) {
+    (void)ctx;
+    function(opaque);
+    return 1;
+}
+
+size_t POOL_sizeof(POOL_ctx* ctx) {
+    if (ctx==NULL) return 0;  /* supports sizeof NULL */
+    assert(ctx == &g_ctx);
+    return sizeof(*ctx);
+}
+
+#endif  /* ZSTD_MULTITHREAD */
diff --git a/vendor/github.com/DataDog/zstd/pool.h b/vendor/github.com/DataDog/zstd/pool.h
new file mode 100644
index 0000000..a57e9b4
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/pool.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef POOL_H
+#define POOL_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+#include <stddef.h>   /* size_t */
+#define ZSTD_STATIC_LINKING_ONLY   /* ZSTD_customMem */
+#include "zstd.h"
+
+typedef struct POOL_ctx_s POOL_ctx;
+
+/*! POOL_create() :
+ *  Create a thread pool with at most `numThreads` threads.
+ * `numThreads` must be at least 1.
+ *  The maximum number of queued jobs before blocking is `queueSize`.
+ * @return : POOL_ctx pointer on success, else NULL.
+*/
+POOL_ctx* POOL_create(size_t numThreads, size_t queueSize);
+
+POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem);
+
+/*! POOL_free() :
+    Free a thread pool returned by POOL_create().
+*/
+void POOL_free(POOL_ctx* ctx);
+
+/*! POOL_sizeof() :
+    return memory usage of pool returned by POOL_create().
+*/
+size_t POOL_sizeof(POOL_ctx* ctx);
+
+/*! POOL_function :
+    The function type that can be added to a thread pool.
+*/
+typedef void (*POOL_function)(void*);
+/*! POOL_add_function :
+    The function type for a generic thread pool add function.
+*/
+typedef void (*POOL_add_function)(void*, POOL_function, void*);
+
+/*! POOL_add() :
+    Add the job `function(opaque)` to the thread pool. `ctx` must be valid.
+    Possibly blocks until there is room in the queue.
+    Note : The function may be executed asynchronously, so `opaque` must live until the function has been completed.
+*/
+void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque);
+
+
+/*! POOL_tryAdd() :
+    Add the job `function(opaque)` to the thread pool if a worker is available.
+    return immediately otherwise.
+   @return : 1 if successful, 0 if not.
+*/
+int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
diff --git a/vendor/github.com/DataDog/zstd/threading.c b/vendor/github.com/DataDog/zstd/threading.c
new file mode 100644
index 0000000..8be8c8d
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/threading.c
@@ -0,0 +1,75 @@
+/**
+ * Copyright (c) 2016 Tino Reichardt
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ *
+ * You can contact the author at:
+ * - zstdmt source repository: https://github.com/mcmilk/zstdmt
+ */
+
+/**
+ * This file will hold wrapper for systems, which do not support pthreads
+ */
+
+/* create fake symbol to avoid empty trnaslation unit warning */
+int g_ZSTD_threading_useles_symbol;
+
+#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
+
+/**
+ * Windows minimalist Pthread Wrapper, based on :
+ * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
+ */
+
+
+/* ===  Dependencies  === */
+#include <process.h>
+#include <errno.h>
+#include "threading.h"
+
+
+/* ===  Implementation  === */
+
+static unsigned __stdcall worker(void *arg)
+{
+    ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg;
+    thread->arg = thread->start_routine(thread->arg);
+    return 0;
+}
+
+int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
+            void* (*start_routine) (void*), void* arg)
+{
+    (void)unused;
+    thread->arg = arg;
+    thread->start_routine = start_routine;
+    thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL);
+
+    if (!thread->handle)
+        return errno;
+    else
+        return 0;
+}
+
+int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr)
+{
+    DWORD result;
+
+    if (!thread.handle) return 0;
+
+    result = WaitForSingleObject(thread.handle, INFINITE);
+    switch (result) {
+    case WAIT_OBJECT_0:
+        if (value_ptr) *value_ptr = thread.arg;
+        return 0;
+    case WAIT_ABANDONED:
+        return EINVAL;
+    default:
+        return GetLastError();
+    }
+}
+
+#endif   /* ZSTD_MULTITHREAD */
diff --git a/vendor/github.com/DataDog/zstd/threading.h b/vendor/github.com/DataDog/zstd/threading.h
new file mode 100644
index 0000000..d806c89
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/threading.h
@@ -0,0 +1,123 @@
+/**
+ * Copyright (c) 2016 Tino Reichardt
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ *
+ * You can contact the author at:
+ * - zstdmt source repository: https://github.com/mcmilk/zstdmt
+ */
+
+#ifndef THREADING_H_938743
+#define THREADING_H_938743
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
+
+/**
+ * Windows minimalist Pthread Wrapper, based on :
+ * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
+ */
+#ifdef WINVER
+#  undef WINVER
+#endif
+#define WINVER       0x0600
+
+#ifdef _WIN32_WINNT
+#  undef _WIN32_WINNT
+#endif
+#define _WIN32_WINNT 0x0600
+
+#ifndef WIN32_LEAN_AND_MEAN
+#  define WIN32_LEAN_AND_MEAN
+#endif
+
+#undef ERROR   /* reported already defined on VS 2015 (Rich Geldreich) */
+#include <windows.h>
+#undef ERROR
+#define ERROR(name) ZSTD_ERROR(name)
+
+
+/* mutex */
+#define ZSTD_pthread_mutex_t           CRITICAL_SECTION
+#define ZSTD_pthread_mutex_init(a, b)  ((void)(b), InitializeCriticalSection((a)), 0)
+#define ZSTD_pthread_mutex_destroy(a)  DeleteCriticalSection((a))
+#define ZSTD_pthread_mutex_lock(a)     EnterCriticalSection((a))
+#define ZSTD_pthread_mutex_unlock(a)   LeaveCriticalSection((a))
+
+/* condition variable */
+#define ZSTD_pthread_cond_t             CONDITION_VARIABLE
+#define ZSTD_pthread_cond_init(a, b)    ((void)(b), InitializeConditionVariable((a)), 0)
+#define ZSTD_pthread_cond_destroy(a)    ((void)(a))
+#define ZSTD_pthread_cond_wait(a, b)    SleepConditionVariableCS((a), (b), INFINITE)
+#define ZSTD_pthread_cond_signal(a)     WakeConditionVariable((a))
+#define ZSTD_pthread_cond_broadcast(a)  WakeAllConditionVariable((a))
+
+/* ZSTD_pthread_create() and ZSTD_pthread_join() */
+typedef struct {
+    HANDLE handle;
+    void* (*start_routine)(void*);
+    void* arg;
+} ZSTD_pthread_t;
+
+int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
+                   void* (*start_routine) (void*), void* arg);
+
+int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr);
+
+/**
+ * add here more wrappers as required
+ */
+
+
+#elif defined(ZSTD_MULTITHREAD)   /* posix assumed ; need a better detection method */
+/* ===   POSIX Systems   === */
+#  include <pthread.h>
+
+#define ZSTD_pthread_mutex_t            pthread_mutex_t
+#define ZSTD_pthread_mutex_init(a, b)   pthread_mutex_init((a), (b))
+#define ZSTD_pthread_mutex_destroy(a)   pthread_mutex_destroy((a))
+#define ZSTD_pthread_mutex_lock(a)      pthread_mutex_lock((a))
+#define ZSTD_pthread_mutex_unlock(a)    pthread_mutex_unlock((a))
+
+#define ZSTD_pthread_cond_t             pthread_cond_t
+#define ZSTD_pthread_cond_init(a, b)    pthread_cond_init((a), (b))
+#define ZSTD_pthread_cond_destroy(a)    pthread_cond_destroy((a))
+#define ZSTD_pthread_cond_wait(a, b)    pthread_cond_wait((a), (b))
+#define ZSTD_pthread_cond_signal(a)     pthread_cond_signal((a))
+#define ZSTD_pthread_cond_broadcast(a)  pthread_cond_broadcast((a))
+
+#define ZSTD_pthread_t                  pthread_t
+#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
+#define ZSTD_pthread_join(a, b)         pthread_join((a),(b))
+
+#else  /* ZSTD_MULTITHREAD not defined */
+/* No multithreading support */
+
+typedef int ZSTD_pthread_mutex_t;
+#define ZSTD_pthread_mutex_init(a, b)   ((void)(a), (void)(b), 0)
+#define ZSTD_pthread_mutex_destroy(a)   ((void)(a))
+#define ZSTD_pthread_mutex_lock(a)      ((void)(a))
+#define ZSTD_pthread_mutex_unlock(a)    ((void)(a))
+
+typedef int ZSTD_pthread_cond_t;
+#define ZSTD_pthread_cond_init(a, b)    ((void)(a), (void)(b), 0)
+#define ZSTD_pthread_cond_destroy(a)    ((void)(a))
+#define ZSTD_pthread_cond_wait(a, b)    ((void)(a), (void)(b))
+#define ZSTD_pthread_cond_signal(a)     ((void)(a))
+#define ZSTD_pthread_cond_broadcast(a)  ((void)(a))
+
+/* do not use ZSTD_pthread_t */
+
+#endif /* ZSTD_MULTITHREAD */
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* THREADING_H_938743 */
diff --git a/vendor/github.com/DataDog/zstd/travis_test_32.sh b/vendor/github.com/DataDog/zstd/travis_test_32.sh
new file mode 100755
index 0000000..d29c86c
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/travis_test_32.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+# Get utilities
+yum -y -q -e 0 install wget tar unzip gcc
+
+# Get Go
+wget -q https://dl.google.com/go/go1.11.1.linux-386.tar.gz
+tar -C /usr/local -xzf go1.11.1.linux-386.tar.gz
+export PATH=$PATH:/usr/local/go/bin
+
+# Get payload
+wget -q https://github.com/DataDog/zstd/files/2246767/mr.zip
+unzip mr.zip
+
+# Build and run tests
+cd zstd
+go build
+PAYLOAD=$(pwd)/mr go test -v
+PAYLOAD=$(pwd)/mr go test -bench .
diff --git a/vendor/github.com/DataDog/zstd/update.txt b/vendor/github.com/DataDog/zstd/update.txt
new file mode 100644
index 0000000..1de939f
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/update.txt
@@ -0,0 +1,56 @@
+./lib/common/bitstream.h
+./lib/common/compiler.h
+./lib/compress/zstd_compress_internal.h
+./lib/compress/zstd_fast.h
+./lib/compress/zstd_double_fast.h
+./lib/compress/zstd_lazy.h
+./lib/compress/zstd_ldm.h
+./lib/dictBuilder/cover.c
+./lib/dictBuilder/divsufsort.c
+./lib/dictBuilder/divsufsort.h
+./lib/common/entropy_common.c
+./lib/common/error_private.c
+./lib/common/error_private.h
+./lib/compress/fse_compress.c
+./lib/common/fse_decompress.c
+./lib/common/fse.h
+./lib/compress/huf_compress.c
+./lib/decompress/huf_decompress.c
+./lib/common/huf.h
+./lib/common/mem.h
+./lib/common/pool.c
+./lib/common/pool.h
+./lib/common/threading.c
+./lib/common/threading.h
+./lib/common/xxhash.c
+./lib/common/xxhash.h
+./lib/deprecated/zbuff_common.c
+./lib/deprecated/zbuff_compress.c
+./lib/deprecated/zbuff_decompress.c
+./lib/deprecated/zbuff.h
+./lib/dictBuilder/zdict.c
+./lib/dictBuilder/zdict.h
+./lib/common/zstd_common.c
+./lib/compress/zstd_compress.c
+./lib/decompress/zstd_decompress.c
+./lib/common/zstd_errors.h
+./lib/zstd.h
+./lib/common/zstd_internal.h
+./lib/legacy/zstd_legacy.h
+./lib/compress/zstd_opt.c
+./lib/compress/zstd_opt.h
+./lib/legacy/zstd_v01.c
+./lib/legacy/zstd_v01.h
+./lib/legacy/zstd_v02.c
+./lib/legacy/zstd_v02.h
+./lib/legacy/zstd_v03.c
+./lib/legacy/zstd_v03.h
+./lib/legacy/zstd_v04.c
+./lib/legacy/zstd_v04.h
+./lib/legacy/zstd_v05.c
+./lib/legacy/zstd_v05.h
+./lib/legacy/zstd_v06.c
+./lib/legacy/zstd_v06.h
+./lib/legacy/zstd_v07.c
+./lib/legacy/zstd_v07.h
+
diff --git a/vendor/github.com/DataDog/zstd/xxhash.c b/vendor/github.com/DataDog/zstd/xxhash.c
new file mode 100644
index 0000000..9d9c0e9
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/xxhash.c
@@ -0,0 +1,875 @@
+/*
+*  xxHash - Fast Hash algorithm
+*  Copyright (C) 2012-2016, Yann Collet
+*
+*  BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+*
+*  Redistribution and use in source and binary forms, with or without
+*  modification, are permitted provided that the following conditions are
+*  met:
+*
+*  * Redistributions of source code must retain the above copyright
+*  notice, this list of conditions and the following disclaimer.
+*  * Redistributions in binary form must reproduce the above
+*  copyright notice, this list of conditions and the following disclaimer
+*  in the documentation and/or other materials provided with the
+*  distribution.
+*
+*  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+*  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+*  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+*  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+*  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+*  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+*  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+*  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+*  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+*  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+*  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*  You can contact the author at :
+*  - xxHash homepage: http://www.xxhash.com
+*  - xxHash source repository : https://github.com/Cyan4973/xxHash
+*/
+
+
+/* *************************************
+*  Tuning parameters
+***************************************/
+/*!XXH_FORCE_MEMORY_ACCESS :
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
+ *            It can generate buggy code on targets which do not support unaligned memory accesses.
+ *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://stackoverflow.com/a/32095106/646947 for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef XXH_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
+#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+#    define XXH_FORCE_MEMORY_ACCESS 2
+#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
+  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+#    define XXH_FORCE_MEMORY_ACCESS 1
+#  endif
+#endif
+
+/*!XXH_ACCEPT_NULL_INPUT_POINTER :
+ * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
+ * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
+ * By default, this option is disabled. To enable it, uncomment below define :
+ */
+/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
+
+/*!XXH_FORCE_NATIVE_FORMAT :
+ * By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
+ * Results are therefore identical for little-endian and big-endian CPU.
+ * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
+ * Should endian-independance be of no importance for your application, you may set the #define below to 1,
+ * to improve speed for Big-endian CPU.
+ * This option has no impact on Little_Endian CPU.
+ */
+#ifndef XXH_FORCE_NATIVE_FORMAT   /* can be defined externally */
+#  define XXH_FORCE_NATIVE_FORMAT 0
+#endif
+
+/*!XXH_FORCE_ALIGN_CHECK :
+ * This is a minor performance trick, only useful with lots of very small keys.
+ * It means : check for aligned/unaligned input.
+ * The check costs one initial branch per hash; set to 0 when the input data
+ * is guaranteed to be aligned.
+ */
+#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
+#  if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
+#    define XXH_FORCE_ALIGN_CHECK 0
+#  else
+#    define XXH_FORCE_ALIGN_CHECK 1
+#  endif
+#endif
+
+
+/* *************************************
+*  Includes & Memory related functions
+***************************************/
+/* Modify the local functions below should you wish to use some other memory routines */
+/* for malloc(), free() */
+#include <stdlib.h>
+static void* XXH_malloc(size_t s) { return malloc(s); }
+static void  XXH_free  (void* p)  { free(p); }
+/* for memcpy() */
+#include <string.h>
+static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
+
+#ifndef XXH_STATIC_LINKING_ONLY
+#  define XXH_STATIC_LINKING_ONLY
+#endif
+#include "xxhash.h"
+
+
+/* *************************************
+*  Compiler Specific Options
+***************************************/
+#if defined (__GNUC__) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
+#  define INLINE_KEYWORD inline
+#else
+#  define INLINE_KEYWORD
+#endif
+
+#if defined(__GNUC__)
+#  define FORCE_INLINE_ATTR __attribute__((always_inline))
+#elif defined(_MSC_VER)
+#  define FORCE_INLINE_ATTR __forceinline
+#else
+#  define FORCE_INLINE_ATTR
+#endif
+
+#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
+
+
+#ifdef _MSC_VER
+#  pragma warning(disable : 4127)      /* disable: C4127: conditional expression is constant */
+#endif
+
+
+/* *************************************
+*  Basic Types
+***************************************/
+#ifndef MEM_MODULE
+# define MEM_MODULE
+# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+#   include <stdint.h>
+    typedef uint8_t  BYTE;
+    typedef uint16_t U16;
+    typedef uint32_t U32;
+    typedef  int32_t S32;
+    typedef uint64_t U64;
+#  else
+    typedef unsigned char      BYTE;
+    typedef unsigned short     U16;
+    typedef unsigned int       U32;
+    typedef   signed int       S32;
+    typedef unsigned long long U64;   /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */
+#  endif
+#endif
+
+
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
+
+/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
+static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
+static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign;
+
+static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
+
+#else
+
+/* portable and safe solution. Generally efficient.
+ * see : http://stackoverflow.com/a/32095106/646947
+ */
+
+static U32 XXH_read32(const void* memPtr)
+{
+    U32 val;
+    memcpy(&val, memPtr, sizeof(val));
+    return val;
+}
+
+static U64 XXH_read64(const void* memPtr)
+{
+    U64 val;
+    memcpy(&val, memPtr, sizeof(val));
+    return val;
+}
+
+#endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+
+/* ****************************************
+*  Compiler-specific Functions and Macros
+******************************************/
+#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+
+/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
+#if defined(_MSC_VER)
+#  define XXH_rotl32(x,r) _rotl(x,r)
+#  define XXH_rotl64(x,r) _rotl64(x,r)
+#else
+#  define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
+#  define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
+#endif
+
+#if defined(_MSC_VER)     /* Visual Studio */
+#  define XXH_swap32 _byteswap_ulong
+#  define XXH_swap64 _byteswap_uint64
+#elif GCC_VERSION >= 403
+#  define XXH_swap32 __builtin_bswap32
+#  define XXH_swap64 __builtin_bswap64
+#else
+static U32 XXH_swap32 (U32 x)
+{
+    return  ((x << 24) & 0xff000000 ) |
+            ((x <<  8) & 0x00ff0000 ) |
+            ((x >>  8) & 0x0000ff00 ) |
+            ((x >> 24) & 0x000000ff );
+}
+static U64 XXH_swap64 (U64 x)
+{
+    return  ((x << 56) & 0xff00000000000000ULL) |
+            ((x << 40) & 0x00ff000000000000ULL) |
+            ((x << 24) & 0x0000ff0000000000ULL) |
+            ((x << 8)  & 0x000000ff00000000ULL) |
+            ((x >> 8)  & 0x00000000ff000000ULL) |
+            ((x >> 24) & 0x0000000000ff0000ULL) |
+            ((x >> 40) & 0x000000000000ff00ULL) |
+            ((x >> 56) & 0x00000000000000ffULL);
+}
+#endif
+
+
+/* *************************************
+*  Architecture Macros
+***************************************/
+typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
+
+/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
+#ifndef XXH_CPU_LITTLE_ENDIAN
+    static const int g_one = 1;
+#   define XXH_CPU_LITTLE_ENDIAN   (*(const char*)(&g_one))
+#endif
+
+
+/* ***************************
+*  Memory reads
+*****************************/
+typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
+
+FORCE_INLINE_TEMPLATE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
+{
+    if (align==XXH_unaligned)
+        return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
+    else
+        return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
+}
+
+FORCE_INLINE_TEMPLATE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
+{
+    return XXH_readLE32_align(ptr, endian, XXH_unaligned);
+}
+
+static U32 XXH_readBE32(const void* ptr)
+{
+    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
+}
+
+FORCE_INLINE_TEMPLATE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
+{
+    if (align==XXH_unaligned)
+        return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
+    else
+        return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
+}
+
+FORCE_INLINE_TEMPLATE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
+{
+    return XXH_readLE64_align(ptr, endian, XXH_unaligned);
+}
+
+static U64 XXH_readBE64(const void* ptr)
+{
+    return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
+}
+
+
+/* *************************************
+*  Macros
+***************************************/
+#define XXH_STATIC_ASSERT(c)   { enum { XXH_static_assert = 1/(int)(!!(c)) }; }    /* use only *after* variable declarations */
+
+
+/* *************************************
+*  Constants
+***************************************/
+static const U32 PRIME32_1 = 2654435761U;
+static const U32 PRIME32_2 = 2246822519U;
+static const U32 PRIME32_3 = 3266489917U;
+static const U32 PRIME32_4 =  668265263U;
+static const U32 PRIME32_5 =  374761393U;
+
+static const U64 PRIME64_1 = 11400714785074694791ULL;
+static const U64 PRIME64_2 = 14029467366897019727ULL;
+static const U64 PRIME64_3 =  1609587929392839161ULL;
+static const U64 PRIME64_4 =  9650029242287828579ULL;
+static const U64 PRIME64_5 =  2870177450012600261ULL;
+
+XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
+
+
+/* **************************
+*  Utils
+****************************/
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState)
+{
+    memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState)
+{
+    memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+
+/* ***************************
+*  Simple Hash Functions
+*****************************/
+
+static U32 XXH32_round(U32 seed, U32 input)
+{
+    seed += input * PRIME32_2;
+    seed  = XXH_rotl32(seed, 13);
+    seed *= PRIME32_1;
+    return seed;
+}
+
+FORCE_INLINE_TEMPLATE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
+{
+    const BYTE* p = (const BYTE*)input;
+    const BYTE* bEnd = p + len;
+    U32 h32;
+#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+    if (p==NULL) {
+        len=0;
+        bEnd=p=(const BYTE*)(size_t)16;
+    }
+#endif
+
+    if (len>=16) {
+        const BYTE* const limit = bEnd - 16;
+        U32 v1 = seed + PRIME32_1 + PRIME32_2;
+        U32 v2 = seed + PRIME32_2;
+        U32 v3 = seed + 0;
+        U32 v4 = seed - PRIME32_1;
+
+        do {
+            v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
+            v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
+            v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
+            v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
+        } while (p<=limit);
+
+        h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
+    } else {
+        h32  = seed + PRIME32_5;
+    }
+
+    h32 += (U32) len;
+
+    while (p+4<=bEnd) {
+        h32 += XXH_get32bits(p) * PRIME32_3;
+        h32  = XXH_rotl32(h32, 17) * PRIME32_4 ;
+        p+=4;
+    }
+
+    while (p<bEnd) {
+        h32 += (*p) * PRIME32_5;
+        h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
+        p++;
+    }
+
+    h32 ^= h32 >> 15;
+    h32 *= PRIME32_2;
+    h32 ^= h32 >> 13;
+    h32 *= PRIME32_3;
+    h32 ^= h32 >> 16;
+
+    return h32;
+}
+
+
+XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
+{
+#if 0
+    /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+    XXH32_CREATESTATE_STATIC(state);
+    XXH32_reset(state, seed);
+    XXH32_update(state, input, len);
+    return XXH32_digest(state);
+#else
+    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+    if (XXH_FORCE_ALIGN_CHECK) {
+        if ((((size_t)input) & 3) == 0) {   /* Input is 4-bytes aligned, leverage the speed benefit */
+            if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+                return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
+            else
+                return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
+    }   }
+
+    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+        return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
+    else
+        return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
+#endif
+}
+
+
+static U64 XXH64_round(U64 acc, U64 input)
+{
+    acc += input * PRIME64_2;
+    acc  = XXH_rotl64(acc, 31);
+    acc *= PRIME64_1;
+    return acc;
+}
+
+static U64 XXH64_mergeRound(U64 acc, U64 val)
+{
+    val  = XXH64_round(0, val);
+    acc ^= val;
+    acc  = acc * PRIME64_1 + PRIME64_4;
+    return acc;
+}
+
+FORCE_INLINE_TEMPLATE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
+{
+    const BYTE* p = (const BYTE*)input;
+    const BYTE* const bEnd = p + len;
+    U64 h64;
+#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+    if (p==NULL) {
+        len=0;
+        bEnd=p=(const BYTE*)(size_t)32;
+    }
+#endif
+
+    if (len>=32) {
+        const BYTE* const limit = bEnd - 32;
+        U64 v1 = seed + PRIME64_1 + PRIME64_2;
+        U64 v2 = seed + PRIME64_2;
+        U64 v3 = seed + 0;
+        U64 v4 = seed - PRIME64_1;
+
+        do {
+            v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
+            v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
+            v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
+            v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
+        } while (p<=limit);
+
+        h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+        h64 = XXH64_mergeRound(h64, v1);
+        h64 = XXH64_mergeRound(h64, v2);
+        h64 = XXH64_mergeRound(h64, v3);
+        h64 = XXH64_mergeRound(h64, v4);
+
+    } else {
+        h64  = seed + PRIME64_5;
+    }
+
+    h64 += (U64) len;
+
+    while (p+8<=bEnd) {
+        U64 const k1 = XXH64_round(0, XXH_get64bits(p));
+        h64 ^= k1;
+        h64  = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
+        p+=8;
+    }
+
+    if (p+4<=bEnd) {
+        h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
+        h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
+        p+=4;
+    }
+
+    while (p<bEnd) {
+        h64 ^= (*p) * PRIME64_5;
+        h64 = XXH_rotl64(h64, 11) * PRIME64_1;
+        p++;
+    }
+
+    h64 ^= h64 >> 33;
+    h64 *= PRIME64_2;
+    h64 ^= h64 >> 29;
+    h64 *= PRIME64_3;
+    h64 ^= h64 >> 32;
+
+    return h64;
+}
+
+
+XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
+{
+#if 0
+    /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+    XXH64_CREATESTATE_STATIC(state);
+    XXH64_reset(state, seed);
+    XXH64_update(state, input, len);
+    return XXH64_digest(state);
+#else
+    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+    if (XXH_FORCE_ALIGN_CHECK) {
+        if ((((size_t)input) & 7)==0) {  /* Input is aligned, let's leverage the speed advantage */
+            if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+                return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
+            else
+                return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
+    }   }
+
+    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+        return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
+    else
+        return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
+#endif
+}
+
+
+/* **************************************************
+*  Advanced Hash Functions
+****************************************************/
+
+XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
+{
+    return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
+}
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
+{
+    XXH_free(statePtr);
+    return XXH_OK;
+}
+
+XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
+{
+    return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
+}
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
+{
+    XXH_free(statePtr);
+    return XXH_OK;
+}
+
+
+/*** Hash feed ***/
+
+XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
+{
+    XXH32_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
+    memset(&state, 0, sizeof(state)-4);   /* do not write into reserved, for future removal */
+    state.v1 = seed + PRIME32_1 + PRIME32_2;
+    state.v2 = seed + PRIME32_2;
+    state.v3 = seed + 0;
+    state.v4 = seed - PRIME32_1;
+    memcpy(statePtr, &state, sizeof(state));
+    return XXH_OK;
+}
+
+
+XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
+{
+    XXH64_state_t state;   /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
+    memset(&state, 0, sizeof(state)-8);   /* do not write into reserved, for future removal */
+    state.v1 = seed + PRIME64_1 + PRIME64_2;
+    state.v2 = seed + PRIME64_2;
+    state.v3 = seed + 0;
+    state.v4 = seed - PRIME64_1;
+    memcpy(statePtr, &state, sizeof(state));
+    return XXH_OK;
+}
+
+
+FORCE_INLINE_TEMPLATE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
+{
+    const BYTE* p = (const BYTE*)input;
+    const BYTE* const bEnd = p + len;
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+    if (input==NULL) return XXH_ERROR;
+#endif
+
+    state->total_len_32 += (unsigned)len;
+    state->large_len |= (len>=16) | (state->total_len_32>=16);
+
+    if (state->memsize + len < 16)  {   /* fill in tmp buffer */
+        XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
+        state->memsize += (unsigned)len;
+        return XXH_OK;
+    }
+
+    if (state->memsize) {   /* some data left from previous update */
+        XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
+        {   const U32* p32 = state->mem32;
+            state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
+            state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
+            state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
+            state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;
+        }
+        p += 16-state->memsize;
+        state->memsize = 0;
+    }
+
+    if (p <= bEnd-16) {
+        const BYTE* const limit = bEnd - 16;
+        U32 v1 = state->v1;
+        U32 v2 = state->v2;
+        U32 v3 = state->v3;
+        U32 v4 = state->v4;
+
+        do {
+            v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
+            v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
+            v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
+            v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
+        } while (p<=limit);
+
+        state->v1 = v1;
+        state->v2 = v2;
+        state->v3 = v3;
+        state->v4 = v4;
+    }
+
+    if (p < bEnd) {
+        XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
+        state->memsize = (unsigned)(bEnd-p);
+    }
+
+    return XXH_OK;
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
+{
+    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+        return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
+    else
+        return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
+}
+
+
+
+FORCE_INLINE_TEMPLATE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
+{
+    const BYTE * p = (const BYTE*)state->mem32;
+    const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
+    U32 h32;
+
+    if (state->large_len) {
+        h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
+    } else {
+        h32 = state->v3 /* == seed */ + PRIME32_5;
+    }
+
+    h32 += state->total_len_32;
+
+    while (p+4<=bEnd) {
+        h32 += XXH_readLE32(p, endian) * PRIME32_3;
+        h32  = XXH_rotl32(h32, 17) * PRIME32_4;
+        p+=4;
+    }
+
+    while (p<bEnd) {
+        h32 += (*p) * PRIME32_5;
+        h32  = XXH_rotl32(h32, 11) * PRIME32_1;
+        p++;
+    }
+
+    h32 ^= h32 >> 15;
+    h32 *= PRIME32_2;
+    h32 ^= h32 >> 13;
+    h32 *= PRIME32_3;
+    h32 ^= h32 >> 16;
+
+    return h32;
+}
+
+
+XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
+{
+    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+        return XXH32_digest_endian(state_in, XXH_littleEndian);
+    else
+        return XXH32_digest_endian(state_in, XXH_bigEndian);
+}
+
+
+
+/* **** XXH64 **** */
+
+FORCE_INLINE_TEMPLATE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
+{
+    const BYTE* p = (const BYTE*)input;
+    const BYTE* const bEnd = p + len;
+
+#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
+    if (input==NULL) return XXH_ERROR;
+#endif
+
+    state->total_len += len;
+
+    if (state->memsize + len < 32) {  /* fill in tmp buffer */
+        XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
+        state->memsize += (U32)len;
+        return XXH_OK;
+    }
+
+    if (state->memsize) {   /* tmp buffer is full */
+        XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
+        state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
+        state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
+        state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
+        state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
+        p += 32-state->memsize;
+        state->memsize = 0;
+    }
+
+    if (p+32 <= bEnd) {
+        const BYTE* const limit = bEnd - 32;
+        U64 v1 = state->v1;
+        U64 v2 = state->v2;
+        U64 v3 = state->v3;
+        U64 v4 = state->v4;
+
+        do {
+            v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
+            v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
+            v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
+            v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
+        } while (p<=limit);
+
+        state->v1 = v1;
+        state->v2 = v2;
+        state->v3 = v3;
+        state->v4 = v4;
+    }
+
+    if (p < bEnd) {
+        XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
+        state->memsize = (unsigned)(bEnd-p);
+    }
+
+    return XXH_OK;
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
+{
+    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+        return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
+    else
+        return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
+}
+
+
+
+FORCE_INLINE_TEMPLATE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
+{
+    const BYTE * p = (const BYTE*)state->mem64;
+    const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
+    U64 h64;
+
+    if (state->total_len >= 32) {
+        U64 const v1 = state->v1;
+        U64 const v2 = state->v2;
+        U64 const v3 = state->v3;
+        U64 const v4 = state->v4;
+
+        h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+        h64 = XXH64_mergeRound(h64, v1);
+        h64 = XXH64_mergeRound(h64, v2);
+        h64 = XXH64_mergeRound(h64, v3);
+        h64 = XXH64_mergeRound(h64, v4);
+    } else {
+        h64  = state->v3 + PRIME64_5;
+    }
+
+    h64 += (U64) state->total_len;
+
+    while (p+8<=bEnd) {
+        U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
+        h64 ^= k1;
+        h64  = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
+        p+=8;
+    }
+
+    if (p+4<=bEnd) {
+        h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
+        h64  = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
+        p+=4;
+    }
+
+    while (p<bEnd) {
+        h64 ^= (*p) * PRIME64_5;
+        h64  = XXH_rotl64(h64, 11) * PRIME64_1;
+        p++;
+    }
+
+    h64 ^= h64 >> 33;
+    h64 *= PRIME64_2;
+    h64 ^= h64 >> 29;
+    h64 *= PRIME64_3;
+    h64 ^= h64 >> 32;
+
+    return h64;
+}
+
+
+XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
+{
+    XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+    if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+        return XXH64_digest_endian(state_in, XXH_littleEndian);
+    else
+        return XXH64_digest_endian(state_in, XXH_bigEndian);
+}
+
+
+/* **************************
+*  Canonical representation
+****************************/
+
+/*! Default XXH result types are basic unsigned 32 and 64 bits.
+*   The canonical representation follows human-readable write convention, aka big-endian (large digits first).
+*   These functions allow transformation of hash result into and from its canonical format.
+*   This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
+*/
+
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
+{
+    XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
+    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
+    memcpy(dst, &hash, sizeof(*dst));
+}
+
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
+{
+    XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
+    if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
+    memcpy(dst, &hash, sizeof(*dst));
+}
+
+XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
+{
+    return XXH_readBE32(src);
+}
+
+XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
+{
+    return XXH_readBE64(src);
+}
diff --git a/vendor/github.com/DataDog/zstd/xxhash.h b/vendor/github.com/DataDog/zstd/xxhash.h
new file mode 100644
index 0000000..9bad1f5
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/xxhash.h
@@ -0,0 +1,305 @@
+/*
+   xxHash - Extremely Fast Hash algorithm
+   Header File
+   Copyright (C) 2012-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - xxHash source repository : https://github.com/Cyan4973/xxHash
+*/
+
+/* Notice extracted from xxHash homepage :
+
+xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
+It also successfully passes all tests from the SMHasher suite.
+
+Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
+
+Name            Speed       Q.Score   Author
+xxHash          5.4 GB/s     10
+CrapWow         3.2 GB/s      2       Andrew
+MumurHash 3a    2.7 GB/s     10       Austin Appleby
+SpookyHash      2.0 GB/s     10       Bob Jenkins
+SBox            1.4 GB/s      9       Bret Mulvey
+Lookup3         1.2 GB/s      9       Bob Jenkins
+SuperFastHash   1.2 GB/s      1       Paul Hsieh
+CityHash64      1.05 GB/s    10       Pike & Alakuijala
+FNV             0.55 GB/s     5       Fowler, Noll, Vo
+CRC32           0.43 GB/s     9
+MD5-32          0.33 GB/s    10       Ronald L. Rivest
+SHA1-32         0.28 GB/s    10
+
+Q.Score is a measure of quality of the hash function.
+It depends on successfully passing SMHasher test set.
+10 is a perfect score.
+
+A 64-bits version, named XXH64, is available since r35.
+It offers much better speed, but for 64-bits applications only.
+Name     Speed on 64 bits    Speed on 32 bits
+XXH64       13.8 GB/s            1.9 GB/s
+XXH32        6.8 GB/s            6.0 GB/s
+*/
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#ifndef XXHASH_H_5627135585666179
+#define XXHASH_H_5627135585666179 1
+
+
+/* ****************************
+*  Definitions
+******************************/
+#include <stddef.h>   /* size_t */
+typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
+
+
+/* ****************************
+*  API modifier
+******************************/
+/** XXH_PRIVATE_API
+*   This is useful if you want to include xxhash functions in `static` mode
+*   in order to inline them, and remove their symbol from the public list.
+*   Methodology :
+*     #define XXH_PRIVATE_API
+*     #include "xxhash.h"
+*   `xxhash.c` is automatically included.
+*   It's not useful to compile and link it as a separate module anymore.
+*/
+#ifdef XXH_PRIVATE_API
+#  ifndef XXH_STATIC_LINKING_ONLY
+#    define XXH_STATIC_LINKING_ONLY
+#  endif
+#  if defined(__GNUC__)
+#    define XXH_PUBLIC_API static __inline __attribute__((unused))
+#  elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+#    define XXH_PUBLIC_API static inline
+#  elif defined(_MSC_VER)
+#    define XXH_PUBLIC_API static __inline
+#  else
+#    define XXH_PUBLIC_API static   /* this version may generate warnings for unused static functions; disable the relevant warning */
+#  endif
+#else
+#  define XXH_PUBLIC_API   /* do nothing */
+#endif /* XXH_PRIVATE_API */
+
+/*!XXH_NAMESPACE, aka Namespace Emulation :
+
+If you want to include _and expose_ xxHash functions from within your own library,
+but also want to avoid symbol collisions with another library which also includes xxHash,
+
+you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library
+with the value of XXH_NAMESPACE (so avoid to keep it NULL and avoid numeric values).
+
+Note that no change is required within the calling program as long as it includes `xxhash.h` :
+regular symbol name will be automatically translated by this header.
+*/
+#ifdef XXH_NAMESPACE
+#  define XXH_CAT(A,B) A##B
+#  define XXH_NAME2(A,B) XXH_CAT(A,B)
+#  define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
+#  define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
+#  define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
+#  define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
+#  define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
+#  define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
+#  define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
+#  define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
+#  define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
+#  define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
+#  define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
+#  define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
+#  define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
+#  define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
+#  define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
+#  define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
+#  define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
+#  define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
+#  define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
+#endif
+
+
+/* *************************************
+*  Version
+***************************************/
+#define XXH_VERSION_MAJOR    0
+#define XXH_VERSION_MINOR    6
+#define XXH_VERSION_RELEASE  2
+#define XXH_VERSION_NUMBER  (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
+XXH_PUBLIC_API unsigned XXH_versionNumber (void);
+
+
+/* ****************************
+*  Simple Hash Functions
+******************************/
+typedef unsigned int       XXH32_hash_t;
+typedef unsigned long long XXH64_hash_t;
+
+XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed);
+XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed);
+
+/*!
+XXH32() :
+    Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input".
+    The memory between input & input+length must be valid (allocated and read-accessible).
+    "seed" can be used to alter the result predictably.
+    Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s
+XXH64() :
+    Calculate the 64-bits hash of sequence of length "len" stored at memory address "input".
+    "seed" can be used to alter the result predictably.
+    This function runs 2x faster on 64-bits systems, but slower on 32-bits systems (see benchmark).
+*/
+
+
+/* ****************************
+*  Streaming Hash Functions
+******************************/
+typedef struct XXH32_state_s XXH32_state_t;   /* incomplete type */
+typedef struct XXH64_state_s XXH64_state_t;   /* incomplete type */
+
+/*! State allocation, compatible with dynamic libraries */
+
+XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
+XXH_PUBLIC_API XXH_errorcode  XXH32_freeState(XXH32_state_t* statePtr);
+
+XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
+XXH_PUBLIC_API XXH_errorcode  XXH64_freeState(XXH64_state_t* statePtr);
+
+
+/* hash streaming */
+
+XXH_PUBLIC_API XXH_errorcode XXH32_reset  (XXH32_state_t* statePtr, unsigned int seed);
+XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
+XXH_PUBLIC_API XXH32_hash_t  XXH32_digest (const XXH32_state_t* statePtr);
+
+XXH_PUBLIC_API XXH_errorcode XXH64_reset  (XXH64_state_t* statePtr, unsigned long long seed);
+XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
+XXH_PUBLIC_API XXH64_hash_t  XXH64_digest (const XXH64_state_t* statePtr);
+
+/*
+These functions generate the xxHash of an input provided in multiple segments.
+Note that, for small input, they are slower than single-call functions, due to state management.
+For small input, prefer `XXH32()` and `XXH64()` .
+
+XXH state must first be allocated, using XXH*_createState() .
+
+Start a new hash by initializing state with a seed, using XXH*_reset().
+
+Then, feed the hash state by calling XXH*_update() as many times as necessary.
+Obviously, input must be allocated and read accessible.
+The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
+
+Finally, a hash value can be produced anytime, by using XXH*_digest().
+This function returns the nn-bits hash as an int or long long.
+
+It's still possible to continue inserting input into the hash state after a digest,
+and generate some new hashes later on, by calling again XXH*_digest().
+
+When done, free XXH state space if it was allocated dynamically.
+*/
+
+
+/* **************************
+*  Utils
+****************************/
+#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L))   /* ! C99 */
+#  define restrict   /* disable restrict */
+#endif
+
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dst_state, const XXH32_state_t* restrict src_state);
+XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dst_state, const XXH64_state_t* restrict src_state);
+
+
+/* **************************
+*  Canonical representation
+****************************/
+/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
+*  The canonical representation uses human-readable write convention, aka big-endian (large digits first).
+*  These functions allow transformation of hash result into and from its canonical format.
+*  This way, hash values can be written into a file / memory, and remain comparable on different systems and programs.
+*/
+typedef struct { unsigned char digest[4]; } XXH32_canonical_t;
+typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
+
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
+
+XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
+XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
+
+#endif /* XXHASH_H_5627135585666179 */
+
+
+
+/* ================================================================================================
+   This section contains definitions which are not guaranteed to remain stable.
+   They may change in future versions, becoming incompatible with a different version of the library.
+   They shall only be used with static linking.
+   Never use these definitions in association with dynamic linking !
+=================================================================================================== */
+#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXH_STATIC_H_3543687687345)
+#define XXH_STATIC_H_3543687687345
+
+/* These definitions are only meant to allow allocation of XXH state
+   statically, on stack, or in a struct for example.
+   Do not use members directly. */
+
+   struct XXH32_state_s {
+       unsigned total_len_32;
+       unsigned large_len;
+       unsigned v1;
+       unsigned v2;
+       unsigned v3;
+       unsigned v4;
+       unsigned mem32[4];   /* buffer defined as U32 for alignment */
+       unsigned memsize;
+       unsigned reserved;   /* never read nor write, will be removed in a future version */
+   };   /* typedef'd to XXH32_state_t */
+
+   struct XXH64_state_s {
+       unsigned long long total_len;
+       unsigned long long v1;
+       unsigned long long v2;
+       unsigned long long v3;
+       unsigned long long v4;
+       unsigned long long mem64[4];   /* buffer defined as U64 for alignment */
+       unsigned memsize;
+       unsigned reserved[2];          /* never read nor write, will be removed in a future version */
+   };   /* typedef'd to XXH64_state_t */
+
+
+#  ifdef XXH_PRIVATE_API
+#    include "xxhash.c"   /* include xxhash functions as `static`, for inlining */
+#  endif
+
+#endif /* XXH_STATIC_LINKING_ONLY && XXH_STATIC_H_3543687687345 */
+
+
+#if defined (__cplusplus)
+}
+#endif
diff --git a/vendor/github.com/DataDog/zstd/zbuff.h b/vendor/github.com/DataDog/zstd/zbuff.h
new file mode 100644
index 0000000..a93115d
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zbuff.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* ***************************************************************
+*  NOTES/WARNINGS
+******************************************************************/
+/* The streaming API defined here is deprecated.
+ * Consider migrating towards ZSTD_compressStream() API in `zstd.h`
+ * See 'lib/README.md'.
+ *****************************************************************/
+
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#ifndef ZSTD_BUFFERED_H_23987
+#define ZSTD_BUFFERED_H_23987
+
+/* *************************************
+*  Dependencies
+***************************************/
+#include <stddef.h>      /* size_t */
+#include "zstd.h"        /* ZSTD_CStream, ZSTD_DStream, ZSTDLIB_API */
+
+
+/* ***************************************************************
+*  Compiler specifics
+*****************************************************************/
+/* Deprecation warnings */
+/* Should these warnings be a problem,
+   it is generally possible to disable them,
+   typically with -Wno-deprecated-declarations for gcc
+   or _CRT_SECURE_NO_WARNINGS in Visual.
+   Otherwise, it's also possible to define ZBUFF_DISABLE_DEPRECATE_WARNINGS */
+#ifdef ZBUFF_DISABLE_DEPRECATE_WARNINGS
+#  define ZBUFF_DEPRECATED(message) ZSTDLIB_API  /* disable deprecation warnings */
+#else
+#  if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
+#    define ZBUFF_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_API
+#  elif (defined(__GNUC__) && (__GNUC__ >= 5)) || defined(__clang__)
+#    define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated(message)))
+#  elif defined(__GNUC__) && (__GNUC__ >= 3)
+#    define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated))
+#  elif defined(_MSC_VER)
+#    define ZBUFF_DEPRECATED(message) ZSTDLIB_API __declspec(deprecated(message))
+#  else
+#    pragma message("WARNING: You need to implement ZBUFF_DEPRECATED for this compiler")
+#    define ZBUFF_DEPRECATED(message) ZSTDLIB_API
+#  endif
+#endif /* ZBUFF_DISABLE_DEPRECATE_WARNINGS */
+
+
+/* *************************************
+*  Streaming functions
+***************************************/
+/* This is the easier "buffered" streaming API,
+*  using an internal buffer to lift all restrictions on user-provided buffers
+*  which can be any size, any place, for both input and output.
+*  ZBUFF and ZSTD are 100% interoperable,
+*  frames created by one can be decoded by the other one */
+
+typedef ZSTD_CStream ZBUFF_CCtx;
+ZBUFF_DEPRECATED("use ZSTD_createCStream") ZBUFF_CCtx* ZBUFF_createCCtx(void);
+ZBUFF_DEPRECATED("use ZSTD_freeCStream")   size_t      ZBUFF_freeCCtx(ZBUFF_CCtx* cctx);
+
+ZBUFF_DEPRECATED("use ZSTD_initCStream")           size_t ZBUFF_compressInit(ZBUFF_CCtx* cctx, int compressionLevel);
+ZBUFF_DEPRECATED("use ZSTD_initCStream_usingDict") size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
+
+ZBUFF_DEPRECATED("use ZSTD_compressStream") size_t ZBUFF_compressContinue(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr);
+ZBUFF_DEPRECATED("use ZSTD_flushStream")    size_t ZBUFF_compressFlush(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr);
+ZBUFF_DEPRECATED("use ZSTD_endStream")      size_t ZBUFF_compressEnd(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr);
+
+/*-*************************************************
+*  Streaming compression - howto
+*
+*  A ZBUFF_CCtx object is required to track streaming operation.
+*  Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources.
+*  ZBUFF_CCtx objects can be reused multiple times.
+*
+*  Start by initializing ZBUF_CCtx.
+*  Use ZBUFF_compressInit() to start a new compression operation.
+*  Use ZBUFF_compressInitDictionary() for a compression which requires a dictionary.
+*
+*  Use ZBUFF_compressContinue() repetitively to consume input stream.
+*  *srcSizePtr and *dstCapacityPtr can be any size.
+*  The function will report how many bytes were read or written within *srcSizePtr and *dstCapacityPtr.
+*  Note that it may not consume the entire input, in which case it's up to the caller to present again remaining data.
+*  The content of `dst` will be overwritten (up to *dstCapacityPtr) at each call, so save its content if it matters or change @dst .
+*  @return : a hint to preferred nb of bytes to use as input for next function call (it's just a hint, to improve latency)
+*            or an error code, which can be tested using ZBUFF_isError().
+*
+*  At any moment, it's possible to flush whatever data remains within buffer, using ZBUFF_compressFlush().
+*  The nb of bytes written into `dst` will be reported into *dstCapacityPtr.
+*  Note that the function cannot output more than *dstCapacityPtr,
+*  therefore, some content might still be left into internal buffer if *dstCapacityPtr is too small.
+*  @return : nb of bytes still present into internal buffer (0 if it's empty)
+*            or an error code, which can be tested using ZBUFF_isError().
+*
+*  ZBUFF_compressEnd() instructs to finish a frame.
+*  It will perform a flush and write frame epilogue.
+*  The epilogue is required for decoders to consider a frame completed.
+*  Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small.
+*  In which case, call again ZBUFF_compressFlush() to complete the flush.
+*  @return : nb of bytes still present into internal buffer (0 if it's empty)
+*            or an error code, which can be tested using ZBUFF_isError().
+*
+*  Hint : _recommended buffer_ sizes (not compulsory) : ZBUFF_recommendedCInSize() / ZBUFF_recommendedCOutSize()
+*  input : ZBUFF_recommendedCInSize==128 KB block size is the internal unit, use this value to reduce intermediate stages (better latency)
+*  output : ZBUFF_recommendedCOutSize==ZSTD_compressBound(128 KB) + 3 + 3 : ensures it's always possible to write/flush/end a full block. Skip some buffering.
+*  By using both, it ensures that input will be entirely consumed, and output will always contain the result, reducing intermediate buffering.
+* **************************************************/
+
+
+typedef ZSTD_DStream ZBUFF_DCtx;
+ZBUFF_DEPRECATED("use ZSTD_createDStream") ZBUFF_DCtx* ZBUFF_createDCtx(void);
+ZBUFF_DEPRECATED("use ZSTD_freeDStream")   size_t      ZBUFF_freeDCtx(ZBUFF_DCtx* dctx);
+
+ZBUFF_DEPRECATED("use ZSTD_initDStream")           size_t ZBUFF_decompressInit(ZBUFF_DCtx* dctx);
+ZBUFF_DEPRECATED("use ZSTD_initDStream_usingDict") size_t ZBUFF_decompressInitDictionary(ZBUFF_DCtx* dctx, const void* dict, size_t dictSize);
+
+ZBUFF_DEPRECATED("use ZSTD_decompressStream") size_t ZBUFF_decompressContinue(ZBUFF_DCtx* dctx,
+                                            void* dst, size_t* dstCapacityPtr,
+                                      const void* src, size_t* srcSizePtr);
+
+/*-***************************************************************************
+*  Streaming decompression howto
+*
+*  A ZBUFF_DCtx object is required to track streaming operations.
+*  Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources.
+*  Use ZBUFF_decompressInit() to start a new decompression operation,
+*   or ZBUFF_decompressInitDictionary() if decompression requires a dictionary.
+*  Note that ZBUFF_DCtx objects can be re-init multiple times.
+*
+*  Use ZBUFF_decompressContinue() repetitively to consume your input.
+*  *srcSizePtr and *dstCapacityPtr can be any size.
+*  The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
+*  Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
+*  The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`.
+*  @return : 0 when a frame is completely decoded and fully flushed,
+*            1 when there is still some data left within internal buffer to flush,
+*            >1 when more data is expected, with value being a suggested next input size (it's just a hint, which helps latency),
+*            or an error code, which can be tested using ZBUFF_isError().
+*
+*  Hint : recommended buffer sizes (not compulsory) : ZBUFF_recommendedDInSize() and ZBUFF_recommendedDOutSize()
+*  output : ZBUFF_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
+*  input  : ZBUFF_recommendedDInSize == 128KB + 3;
+*           just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
+* *******************************************************************************/
+
+
+/* *************************************
+*  Tool functions
+***************************************/
+ZBUFF_DEPRECATED("use ZSTD_isError")      unsigned ZBUFF_isError(size_t errorCode);
+ZBUFF_DEPRECATED("use ZSTD_getErrorName") const char* ZBUFF_getErrorName(size_t errorCode);
+
+/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
+*   These sizes are just hints, they tend to offer better latency */
+ZBUFF_DEPRECATED("use ZSTD_CStreamInSize")  size_t ZBUFF_recommendedCInSize(void);
+ZBUFF_DEPRECATED("use ZSTD_CStreamOutSize") size_t ZBUFF_recommendedCOutSize(void);
+ZBUFF_DEPRECATED("use ZSTD_DStreamInSize")  size_t ZBUFF_recommendedDInSize(void);
+ZBUFF_DEPRECATED("use ZSTD_DStreamOutSize") size_t ZBUFF_recommendedDOutSize(void);
+
+#endif  /* ZSTD_BUFFERED_H_23987 */
+
+
+#ifdef ZBUFF_STATIC_LINKING_ONLY
+#ifndef ZBUFF_STATIC_H_30298098432
+#define ZBUFF_STATIC_H_30298098432
+
+/* ====================================================================================
+ * The definitions in this section are considered experimental.
+ * They should never be used in association with a dynamic library, as they may change in the future.
+ * They are provided for advanced usages.
+ * Use them only in association with static linking.
+ * ==================================================================================== */
+
+/*--- Dependency ---*/
+#define ZSTD_STATIC_LINKING_ONLY   /* ZSTD_parameters, ZSTD_customMem */
+#include "zstd.h"
+
+
+/*--- Custom memory allocator ---*/
+/*! ZBUFF_createCCtx_advanced() :
+ *  Create a ZBUFF compression context using external alloc and free functions */
+ZBUFF_DEPRECATED("use ZSTD_createCStream_advanced") ZBUFF_CCtx* ZBUFF_createCCtx_advanced(ZSTD_customMem customMem);
+
+/*! ZBUFF_createDCtx_advanced() :
+ *  Create a ZBUFF decompression context using external alloc and free functions */
+ZBUFF_DEPRECATED("use ZSTD_createDStream_advanced") ZBUFF_DCtx* ZBUFF_createDCtx_advanced(ZSTD_customMem customMem);
+
+
+/*--- Advanced Streaming Initialization ---*/
+ZBUFF_DEPRECATED("use ZSTD_initDStream_usingDict") size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc,
+                                               const void* dict, size_t dictSize,
+                                               ZSTD_parameters params, unsigned long long pledgedSrcSize);
+
+
+#endif    /* ZBUFF_STATIC_H_30298098432 */
+#endif    /* ZBUFF_STATIC_LINKING_ONLY */
+
+
+#if defined (__cplusplus)
+}
+#endif
diff --git a/vendor/github.com/DataDog/zstd/zbuff_common.c b/vendor/github.com/DataDog/zstd/zbuff_common.c
new file mode 100644
index 0000000..661b9b0
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zbuff_common.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/*-*************************************
+*  Dependencies
+***************************************/
+#include "error_private.h"
+#include "zbuff.h"
+
+/*-****************************************
+*  ZBUFF Error Management  (deprecated)
+******************************************/
+
+/*! ZBUFF_isError() :
+*   tells if a return value is an error code */
+unsigned ZBUFF_isError(size_t errorCode) { return ERR_isError(errorCode); }
+/*! ZBUFF_getErrorName() :
+*   provides error code string from function result (useful for debugging) */
+const char* ZBUFF_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
diff --git a/vendor/github.com/DataDog/zstd/zbuff_compress.c b/vendor/github.com/DataDog/zstd/zbuff_compress.c
new file mode 100644
index 0000000..f39c60d
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zbuff_compress.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+
+/* *************************************
+*  Dependencies
+***************************************/
+#define ZBUFF_STATIC_LINKING_ONLY
+#include "zbuff.h"
+
+
+/*-***********************************************************
+*  Streaming compression
+*
+*  A ZBUFF_CCtx object is required to track streaming operation.
+*  Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources.
+*  Use ZBUFF_compressInit() to start a new compression operation.
+*  ZBUFF_CCtx objects can be reused multiple times.
+*
+*  Use ZBUFF_compressContinue() repetitively to consume your input.
+*  *srcSizePtr and *dstCapacityPtr can be any size.
+*  The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
+*  Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input.
+*  The content of dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters or change dst .
+*  @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)
+*            or an error code, which can be tested using ZBUFF_isError().
+*
+*  ZBUFF_compressFlush() can be used to instruct ZBUFF to compress and output whatever remains within its buffer.
+*  Note that it will not output more than *dstCapacityPtr.
+*  Therefore, some content might still be left into its internal buffer if dst buffer is too small.
+*  @return : nb of bytes still present into internal buffer (0 if it's empty)
+*            or an error code, which can be tested using ZBUFF_isError().
+*
+*  ZBUFF_compressEnd() instructs to finish a frame.
+*  It will perform a flush and write frame epilogue.
+*  Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small.
+*  @return : nb of bytes still present into internal buffer (0 if it's empty)
+*            or an error code, which can be tested using ZBUFF_isError().
+*
+*  Hint : recommended buffer sizes (not compulsory)
+*  input : ZSTD_BLOCKSIZE_MAX (128 KB), internal unit size, it improves latency to use this value.
+*  output : ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + ZBUFF_endFrameSize : ensures it's always possible to write/flush/end a full block at best speed.
+* ***********************************************************/
+
+ZBUFF_CCtx* ZBUFF_createCCtx(void)
+{
+    return ZSTD_createCStream();
+}
+
+ZBUFF_CCtx* ZBUFF_createCCtx_advanced(ZSTD_customMem customMem)
+{
+    return ZSTD_createCStream_advanced(customMem);
+}
+
+size_t ZBUFF_freeCCtx(ZBUFF_CCtx* zbc)
+{
+    return ZSTD_freeCStream(zbc);
+}
+
+
+/* ======   Initialization   ====== */
+
+size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc,
+                                   const void* dict, size_t dictSize,
+                                   ZSTD_parameters params, unsigned long long pledgedSrcSize)
+{
+    if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;  /* preserve "0 == unknown" behavior */
+    return ZSTD_initCStream_advanced(zbc, dict, dictSize, params, pledgedSrcSize);
+}
+
+
+size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* zbc, const void* dict, size_t dictSize, int compressionLevel)
+{
+    return ZSTD_initCStream_usingDict(zbc, dict, dictSize, compressionLevel);
+}
+
+size_t ZBUFF_compressInit(ZBUFF_CCtx* zbc, int compressionLevel)
+{
+    return ZSTD_initCStream(zbc, compressionLevel);
+}
+
+/* ======   Compression   ====== */
+
+
+size_t ZBUFF_compressContinue(ZBUFF_CCtx* zbc,
+                              void* dst, size_t* dstCapacityPtr,
+                        const void* src, size_t* srcSizePtr)
+{
+    size_t result;
+    ZSTD_outBuffer outBuff;
+    ZSTD_inBuffer inBuff;
+    outBuff.dst = dst;
+    outBuff.pos = 0;
+    outBuff.size = *dstCapacityPtr;
+    inBuff.src = src;
+    inBuff.pos = 0;
+    inBuff.size = *srcSizePtr;
+    result = ZSTD_compressStream(zbc, &outBuff, &inBuff);
+    *dstCapacityPtr = outBuff.pos;
+    *srcSizePtr = inBuff.pos;
+    return result;
+}
+
+
+
+/* ======   Finalize   ====== */
+
+size_t ZBUFF_compressFlush(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr)
+{
+    size_t result;
+    ZSTD_outBuffer outBuff;
+    outBuff.dst = dst;
+    outBuff.pos = 0;
+    outBuff.size = *dstCapacityPtr;
+    result = ZSTD_flushStream(zbc, &outBuff);
+    *dstCapacityPtr = outBuff.pos;
+    return result;
+}
+
+
+size_t ZBUFF_compressEnd(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr)
+{
+    size_t result;
+    ZSTD_outBuffer outBuff;
+    outBuff.dst = dst;
+    outBuff.pos = 0;
+    outBuff.size = *dstCapacityPtr;
+    result = ZSTD_endStream(zbc, &outBuff);
+    *dstCapacityPtr = outBuff.pos;
+    return result;
+}
+
+
+
+/* *************************************
+*  Tool functions
+***************************************/
+size_t ZBUFF_recommendedCInSize(void)  { return ZSTD_CStreamInSize(); }
+size_t ZBUFF_recommendedCOutSize(void) { return ZSTD_CStreamOutSize(); }
diff --git a/vendor/github.com/DataDog/zstd/zbuff_decompress.c b/vendor/github.com/DataDog/zstd/zbuff_decompress.c
new file mode 100644
index 0000000..923c22b
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zbuff_decompress.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+
+/* *************************************
+*  Dependencies
+***************************************/
+#define ZBUFF_STATIC_LINKING_ONLY
+#include "zbuff.h"
+
+
+ZBUFF_DCtx* ZBUFF_createDCtx(void)
+{
+    return ZSTD_createDStream();
+}
+
+ZBUFF_DCtx* ZBUFF_createDCtx_advanced(ZSTD_customMem customMem)
+{
+    return ZSTD_createDStream_advanced(customMem);
+}
+
+size_t ZBUFF_freeDCtx(ZBUFF_DCtx* zbd)
+{
+    return ZSTD_freeDStream(zbd);
+}
+
+
+/* *** Initialization *** */
+
+size_t ZBUFF_decompressInitDictionary(ZBUFF_DCtx* zbd, const void* dict, size_t dictSize)
+{
+    return ZSTD_initDStream_usingDict(zbd, dict, dictSize);
+}
+
+size_t ZBUFF_decompressInit(ZBUFF_DCtx* zbd)
+{
+    return ZSTD_initDStream(zbd);
+}
+
+
+/* *** Decompression *** */
+
+size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbd,
+                                void* dst, size_t* dstCapacityPtr,
+                          const void* src, size_t* srcSizePtr)
+{
+    ZSTD_outBuffer outBuff;
+    ZSTD_inBuffer inBuff;
+    size_t result;
+    outBuff.dst  = dst;
+    outBuff.pos  = 0;
+    outBuff.size = *dstCapacityPtr;
+    inBuff.src  = src;
+    inBuff.pos  = 0;
+    inBuff.size = *srcSizePtr;
+    result = ZSTD_decompressStream(zbd, &outBuff, &inBuff);
+    *dstCapacityPtr = outBuff.pos;
+    *srcSizePtr = inBuff.pos;
+    return result;
+}
+
+
+/* *************************************
+*  Tool functions
+***************************************/
+size_t ZBUFF_recommendedDInSize(void)  { return ZSTD_DStreamInSize(); }
+size_t ZBUFF_recommendedDOutSize(void) { return ZSTD_DStreamOutSize(); }
diff --git a/vendor/github.com/DataDog/zstd/zdict.c b/vendor/github.com/DataDog/zstd/zdict.c
new file mode 100644
index 0000000..7d24e49
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zdict.c
@@ -0,0 +1,1108 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/*-**************************************
+*  Tuning parameters
+****************************************/
+#define MINRATIO 4   /* minimum nb of apparition to be selected in dictionary */
+#define ZDICT_MAX_SAMPLES_SIZE (2000U << 20)
+#define ZDICT_MIN_SAMPLES_SIZE (ZDICT_CONTENTSIZE_MIN * MINRATIO)
+
+
+/*-**************************************
+*  Compiler Options
+****************************************/
+/* Unix Large Files support (>4GB) */
+#define _FILE_OFFSET_BITS 64
+#if (defined(__sun__) && (!defined(__LP64__)))   /* Sun Solaris 32-bits requires specific definitions */
+#  define _LARGEFILE_SOURCE
+#elif ! defined(__LP64__)                        /* No point defining Large file for 64 bit */
+#  define _LARGEFILE64_SOURCE
+#endif
+
+
+/*-*************************************
+*  Dependencies
+***************************************/
+#include <stdlib.h>        /* malloc, free */
+#include <string.h>        /* memset */
+#include <stdio.h>         /* fprintf, fopen, ftello64 */
+#include <time.h>          /* clock */
+
+#include "mem.h"           /* read */
+#include "fse.h"           /* FSE_normalizeCount, FSE_writeNCount */
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"           /* HUF_buildCTable, HUF_writeCTable */
+#include "zstd_internal.h" /* includes zstd.h */
+#include "xxhash.h"        /* XXH64 */
+#include "divsufsort.h"
+#ifndef ZDICT_STATIC_LINKING_ONLY
+#  define ZDICT_STATIC_LINKING_ONLY
+#endif
+#include "zdict.h"
+
+
+/*-*************************************
+*  Constants
+***************************************/
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define DICTLISTSIZE_DEFAULT 10000
+
+#define NOISELENGTH 32
+
+static const int g_compressionLevel_default = 3;
+static const U32 g_selectivity_default = 9;
+
+
+/*-*************************************
+*  Console display
+***************************************/
+#define DISPLAY(...)         { fprintf(stderr, __VA_ARGS__); fflush( stderr ); }
+#define DISPLAYLEVEL(l, ...) if (notificationLevel>=l) { DISPLAY(__VA_ARGS__); }    /* 0 : no display;   1: errors;   2: default;  3: details;  4: debug */
+
+static clock_t ZDICT_clockSpan(clock_t nPrevious) { return clock() - nPrevious; }
+
+static void ZDICT_printHex(const void* ptr, size_t length)
+{
+    const BYTE* const b = (const BYTE*)ptr;
+    size_t u;
+    for (u=0; u<length; u++) {
+        BYTE c = b[u];
+        if (c<32 || c>126) c = '.';   /* non-printable char */
+        DISPLAY("%c", c);
+    }
+}
+
+
+/*-********************************************************
+*  Helper functions
+**********************************************************/
+unsigned ZDICT_isError(size_t errorCode) { return ERR_isError(errorCode); }
+
+const char* ZDICT_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
+
+unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize)
+{
+    if (dictSize < 8) return 0;
+    if (MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return 0;
+    return MEM_readLE32((const char*)dictBuffer + 4);
+}
+
+
+/*-********************************************************
+*  Dictionary training functions
+**********************************************************/
+static unsigned ZDICT_NbCommonBytes (size_t val)
+{
+    if (MEM_isLittleEndian()) {
+        if (MEM_64bits()) {
+#       if defined(_MSC_VER) && defined(_WIN64)
+            unsigned long r = 0;
+            _BitScanForward64( &r, (U64)val );
+            return (unsigned)(r>>3);
+#       elif defined(__GNUC__) && (__GNUC__ >= 3)
+            return (__builtin_ctzll((U64)val) >> 3);
+#       else
+            static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
+            return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
+#       endif
+        } else { /* 32 bits */
+#       if defined(_MSC_VER)
+            unsigned long r=0;
+            _BitScanForward( &r, (U32)val );
+            return (unsigned)(r>>3);
+#       elif defined(__GNUC__) && (__GNUC__ >= 3)
+            return (__builtin_ctz((U32)val) >> 3);
+#       else
+            static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
+            return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
+#       endif
+        }
+    } else {  /* Big Endian CPU */
+        if (MEM_64bits()) {
+#       if defined(_MSC_VER) && defined(_WIN64)
+            unsigned long r = 0;
+            _BitScanReverse64( &r, val );
+            return (unsigned)(r>>3);
+#       elif defined(__GNUC__) && (__GNUC__ >= 3)
+            return (__builtin_clzll(val) >> 3);
+#       else
+            unsigned r;
+            const unsigned n32 = sizeof(size_t)*4;   /* calculate this way due to compiler complaining in 32-bits mode */
+            if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
+            if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
+            r += (!val);
+            return r;
+#       endif
+        } else { /* 32 bits */
+#       if defined(_MSC_VER)
+            unsigned long r = 0;
+            _BitScanReverse( &r, (unsigned long)val );
+            return (unsigned)(r>>3);
+#       elif defined(__GNUC__) && (__GNUC__ >= 3)
+            return (__builtin_clz((U32)val) >> 3);
+#       else
+            unsigned r;
+            if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
+            r += (!val);
+            return r;
+#       endif
+    }   }
+}
+
+
+/*! ZDICT_count() :
+    Count the nb of common bytes between 2 pointers.
+    Note : this function presumes end of buffer followed by noisy guard band.
+*/
+static size_t ZDICT_count(const void* pIn, const void* pMatch)
+{
+    const char* const pStart = (const char*)pIn;
+    for (;;) {
+        size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
+        if (!diff) {
+            pIn = (const char*)pIn+sizeof(size_t);
+            pMatch = (const char*)pMatch+sizeof(size_t);
+            continue;
+        }
+        pIn = (const char*)pIn+ZDICT_NbCommonBytes(diff);
+        return (size_t)((const char*)pIn - pStart);
+    }
+}
+
+
+typedef struct {
+    U32 pos;
+    U32 length;
+    U32 savings;
+} dictItem;
+
+static void ZDICT_initDictItem(dictItem* d)
+{
+    d->pos = 1;
+    d->length = 0;
+    d->savings = (U32)(-1);
+}
+
+
+#define LLIMIT 64          /* heuristic determined experimentally */
+#define MINMATCHLENGTH 7   /* heuristic determined experimentally */
+static dictItem ZDICT_analyzePos(
+                       BYTE* doneMarks,
+                       const int* suffix, U32 start,
+                       const void* buffer, U32 minRatio, U32 notificationLevel)
+{
+    U32 lengthList[LLIMIT] = {0};
+    U32 cumulLength[LLIMIT] = {0};
+    U32 savings[LLIMIT] = {0};
+    const BYTE* b = (const BYTE*)buffer;
+    size_t maxLength = LLIMIT;
+    size_t pos = suffix[start];
+    U32 end = start;
+    dictItem solution;
+
+    /* init */
+    memset(&solution, 0, sizeof(solution));
+    doneMarks[pos] = 1;
+
+    /* trivial repetition cases */
+    if ( (MEM_read16(b+pos+0) == MEM_read16(b+pos+2))
+       ||(MEM_read16(b+pos+1) == MEM_read16(b+pos+3))
+       ||(MEM_read16(b+pos+2) == MEM_read16(b+pos+4)) ) {
+        /* skip and mark segment */
+        U16 const pattern16 = MEM_read16(b+pos+4);
+        U32 u, patternEnd = 6;
+        while (MEM_read16(b+pos+patternEnd) == pattern16) patternEnd+=2 ;
+        if (b[pos+patternEnd] == b[pos+patternEnd-1]) patternEnd++;
+        for (u=1; u<patternEnd; u++)
+            doneMarks[pos+u] = 1;
+        return solution;
+    }
+
+    /* look forward */
+    {   size_t length;
+        do {
+            end++;
+            length = ZDICT_count(b + pos, b + suffix[end]);
+        } while (length >= MINMATCHLENGTH);
+    }
+
+    /* look backward */
+    {   size_t length;
+        do {
+            length = ZDICT_count(b + pos, b + *(suffix+start-1));
+            if (length >=MINMATCHLENGTH) start--;
+        } while(length >= MINMATCHLENGTH);
+    }
+
+    /* exit if not found a minimum nb of repetitions */
+    if (end-start < minRatio) {
+        U32 idx;
+        for(idx=start; idx<end; idx++)
+            doneMarks[suffix[idx]] = 1;
+        return solution;
+    }
+
+    {   int i;
+        U32 searchLength;
+        U32 refinedStart = start;
+        U32 refinedEnd = end;
+
+        DISPLAYLEVEL(4, "\n");
+        DISPLAYLEVEL(4, "found %3u matches of length >= %i at pos %7u  ", (U32)(end-start), MINMATCHLENGTH, (U32)pos);
+        DISPLAYLEVEL(4, "\n");
+
+        for (searchLength = MINMATCHLENGTH ; ; searchLength++) {
+            BYTE currentChar = 0;
+            U32 currentCount = 0;
+            U32 currentID = refinedStart;
+            U32 id;
+            U32 selectedCount = 0;
+            U32 selectedID = currentID;
+            for (id =refinedStart; id < refinedEnd; id++) {
+                if (b[suffix[id] + searchLength] != currentChar) {
+                    if (currentCount > selectedCount) {
+                        selectedCount = currentCount;
+                        selectedID = currentID;
+                    }
+                    currentID = id;
+                    currentChar = b[ suffix[id] + searchLength];
+                    currentCount = 0;
+                }
+                currentCount ++;
+            }
+            if (currentCount > selectedCount) {  /* for last */
+                selectedCount = currentCount;
+                selectedID = currentID;
+            }
+
+            if (selectedCount < minRatio)
+                break;
+            refinedStart = selectedID;
+            refinedEnd = refinedStart + selectedCount;
+        }
+
+        /* evaluate gain based on new ref */
+        start = refinedStart;
+        pos = suffix[refinedStart];
+        end = start;
+        memset(lengthList, 0, sizeof(lengthList));
+
+        /* look forward */
+        {   size_t length;
+            do {
+                end++;
+                length = ZDICT_count(b + pos, b + suffix[end]);
+                if (length >= LLIMIT) length = LLIMIT-1;
+                lengthList[length]++;
+            } while (length >=MINMATCHLENGTH);
+        }
+
+        /* look backward */
+        {   size_t length = MINMATCHLENGTH;
+            while ((length >= MINMATCHLENGTH) & (start > 0)) {
+                length = ZDICT_count(b + pos, b + suffix[start - 1]);
+                if (length >= LLIMIT) length = LLIMIT - 1;
+                lengthList[length]++;
+                if (length >= MINMATCHLENGTH) start--;
+            }
+        }
+
+        /* largest useful length */
+        memset(cumulLength, 0, sizeof(cumulLength));
+        cumulLength[maxLength-1] = lengthList[maxLength-1];
+        for (i=(int)(maxLength-2); i>=0; i--)
+            cumulLength[i] = cumulLength[i+1] + lengthList[i];
+
+        for (i=LLIMIT-1; i>=MINMATCHLENGTH; i--) if (cumulLength[i]>=minRatio) break;
+        maxLength = i;
+
+        /* reduce maxLength in case of final into repetitive data */
+        {   U32 l = (U32)maxLength;
+            BYTE const c = b[pos + maxLength-1];
+            while (b[pos+l-2]==c) l--;
+            maxLength = l;
+        }
+        if (maxLength < MINMATCHLENGTH) return solution;   /* skip : no long-enough solution */
+
+        /* calculate savings */
+        savings[5] = 0;
+        for (i=MINMATCHLENGTH; i<=(int)maxLength; i++)
+            savings[i] = savings[i-1] + (lengthList[i] * (i-3));
+
+        DISPLAYLEVEL(4, "Selected ref at position %u, of length %u : saves %u (ratio: %.2f)  \n",
+                     (U32)pos, (U32)maxLength, savings[maxLength], (double)savings[maxLength] / maxLength);
+
+        solution.pos = (U32)pos;
+        solution.length = (U32)maxLength;
+        solution.savings = savings[maxLength];
+
+        /* mark positions done */
+        {   U32 id;
+            for (id=start; id<end; id++) {
+                U32 p, pEnd, length;
+                U32 const testedPos = suffix[id];
+                if (testedPos == pos)
+                    length = solution.length;
+                else {
+                    length = (U32)ZDICT_count(b+pos, b+testedPos);
+                    if (length > solution.length) length = solution.length;
+                }
+                pEnd = (U32)(testedPos + length);
+                for (p=testedPos; p<pEnd; p++)
+                    doneMarks[p] = 1;
+    }   }   }
+
+    return solution;
+}
+
+
+static int isIncluded(const void* in, const void* container, size_t length)
+{
+    const char* const ip = (const char*) in;
+    const char* const into = (const char*) container;
+    size_t u;
+
+    for (u=0; u<length; u++) {  /* works because end of buffer is a noisy guard band */
+        if (ip[u] != into[u]) break;
+    }
+
+    return u==length;
+}
+
+/*! ZDICT_tryMerge() :
+    check if dictItem can be merged, do it if possible
+    @return : id of destination elt, 0 if not merged
+*/
+static U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const void* buffer)
+{
+    const U32 tableSize = table->pos;
+    const U32 eltEnd = elt.pos + elt.length;
+    const char* const buf = (const char*) buffer;
+
+    /* tail overlap */
+    U32 u; for (u=1; u<tableSize; u++) {
+        if (u==eltNbToSkip) continue;
+        if ((table[u].pos > elt.pos) && (table[u].pos <= eltEnd)) {  /* overlap, existing > new */
+            /* append */
+            U32 const addedLength = table[u].pos - elt.pos;
+            table[u].length += addedLength;
+            table[u].pos = elt.pos;
+            table[u].savings += elt.savings * addedLength / elt.length;   /* rough approx */
+            table[u].savings += elt.length / 8;    /* rough approx bonus */
+            elt = table[u];
+            /* sort : improve rank */
+            while ((u>1) && (table[u-1].savings < elt.savings))
+            table[u] = table[u-1], u--;
+            table[u] = elt;
+            return u;
+    }   }
+
+    /* front overlap */
+    for (u=1; u<tableSize; u++) {
+        if (u==eltNbToSkip) continue;
+
+        if ((table[u].pos + table[u].length >= elt.pos) && (table[u].pos < elt.pos)) {  /* overlap, existing < new */
+            /* append */
+            int const addedLength = (int)eltEnd - (table[u].pos + table[u].length);
+            table[u].savings += elt.length / 8;    /* rough approx bonus */
+            if (addedLength > 0) {   /* otherwise, elt fully included into existing */
+                table[u].length += addedLength;
+                table[u].savings += elt.savings * addedLength / elt.length;   /* rough approx */
+            }
+            /* sort : improve rank */
+            elt = table[u];
+            while ((u>1) && (table[u-1].savings < elt.savings))
+                table[u] = table[u-1], u--;
+            table[u] = elt;
+            return u;
+        }
+
+        if (MEM_read64(buf + table[u].pos) == MEM_read64(buf + elt.pos + 1)) {
+            if (isIncluded(buf + table[u].pos, buf + elt.pos + 1, table[u].length)) {
+                size_t const addedLength = MAX( (int)elt.length - (int)table[u].length , 1 );
+                table[u].pos = elt.pos;
+                table[u].savings += (U32)(elt.savings * addedLength / elt.length);
+                table[u].length = MIN(elt.length, table[u].length + 1);
+                return u;
+            }
+        }
+    }
+
+    return 0;
+}
+
+
+static void ZDICT_removeDictItem(dictItem* table, U32 id)
+{
+    /* convention : table[0].pos stores nb of elts */
+    U32 const max = table[0].pos;
+    U32 u;
+    if (!id) return;   /* protection, should never happen */
+    for (u=id; u<max-1; u++)
+        table[u] = table[u+1];
+    table->pos--;
+}
+
+
+static void ZDICT_insertDictItem(dictItem* table, U32 maxSize, dictItem elt, const void* buffer)
+{
+    /* merge if possible */
+    U32 mergeId = ZDICT_tryMerge(table, elt, 0, buffer);
+    if (mergeId) {
+        U32 newMerge = 1;
+        while (newMerge) {
+            newMerge = ZDICT_tryMerge(table, table[mergeId], mergeId, buffer);
+            if (newMerge) ZDICT_removeDictItem(table, mergeId);
+            mergeId = newMerge;
+        }
+        return;
+    }
+
+    /* insert */
+    {   U32 current;
+        U32 nextElt = table->pos;
+        if (nextElt >= maxSize) nextElt = maxSize-1;
+        current = nextElt-1;
+        while (table[current].savings < elt.savings) {
+            table[current+1] = table[current];
+            current--;
+        }
+        table[current+1] = elt;
+        table->pos = nextElt+1;
+    }
+}
+
+
+static U32 ZDICT_dictSize(const dictItem* dictList)
+{
+    U32 u, dictSize = 0;
+    for (u=1; u<dictList[0].pos; u++)
+        dictSize += dictList[u].length;
+    return dictSize;
+}
+
+
+static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize,
+                            const void* const buffer, size_t bufferSize,   /* buffer must end with noisy guard band */
+                            const size_t* fileSizes, unsigned nbFiles,
+                            U32 minRatio, U32 notificationLevel)
+{
+    int* const suffix0 = (int*)malloc((bufferSize+2)*sizeof(*suffix0));
+    int* const suffix = suffix0+1;
+    U32* reverseSuffix = (U32*)malloc((bufferSize)*sizeof(*reverseSuffix));
+    BYTE* doneMarks = (BYTE*)malloc((bufferSize+16)*sizeof(*doneMarks));   /* +16 for overflow security */
+    U32* filePos = (U32*)malloc(nbFiles * sizeof(*filePos));
+    size_t result = 0;
+    clock_t displayClock = 0;
+    clock_t const refreshRate = CLOCKS_PER_SEC * 3 / 10;
+
+#   define DISPLAYUPDATE(l, ...) if (notificationLevel>=l) { \
+            if (ZDICT_clockSpan(displayClock) > refreshRate)  \
+            { displayClock = clock(); DISPLAY(__VA_ARGS__); \
+            if (notificationLevel>=4) fflush(stderr); } }
+
+    /* init */
+    DISPLAYLEVEL(2, "\r%70s\r", "");   /* clean display line */
+    if (!suffix0 || !reverseSuffix || !doneMarks || !filePos) {
+        result = ERROR(memory_allocation);
+        goto _cleanup;
+    }
+    if (minRatio < MINRATIO) minRatio = MINRATIO;
+    memset(doneMarks, 0, bufferSize+16);
+
+    /* limit sample set size (divsufsort limitation)*/
+    if (bufferSize > ZDICT_MAX_SAMPLES_SIZE) DISPLAYLEVEL(3, "sample set too large : reduced to %u MB ...\n", (U32)(ZDICT_MAX_SAMPLES_SIZE>>20));
+    while (bufferSize > ZDICT_MAX_SAMPLES_SIZE) bufferSize -= fileSizes[--nbFiles];
+
+    /* sort */
+    DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (U32)(bufferSize>>20));
+    {   int const divSuftSortResult = divsufsort((const unsigned char*)buffer, suffix, (int)bufferSize, 0);
+        if (divSuftSortResult != 0) { result = ERROR(GENERIC); goto _cleanup; }
+    }
+    suffix[bufferSize] = (int)bufferSize;   /* leads into noise */
+    suffix0[0] = (int)bufferSize;           /* leads into noise */
+    /* build reverse suffix sort */
+    {   size_t pos;
+        for (pos=0; pos < bufferSize; pos++)
+            reverseSuffix[suffix[pos]] = (U32)pos;
+        /* note filePos tracks borders between samples.
+           It's not used at this stage, but planned to become useful in a later update */
+        filePos[0] = 0;
+        for (pos=1; pos<nbFiles; pos++)
+            filePos[pos] = (U32)(filePos[pos-1] + fileSizes[pos-1]);
+    }
+
+    DISPLAYLEVEL(2, "finding patterns ... \n");
+    DISPLAYLEVEL(3, "minimum ratio : %u \n", minRatio);
+
+    {   U32 cursor; for (cursor=0; cursor < bufferSize; ) {
+            dictItem solution;
+            if (doneMarks[cursor]) { cursor++; continue; }
+            solution = ZDICT_analyzePos(doneMarks, suffix, reverseSuffix[cursor], buffer, minRatio, notificationLevel);
+            if (solution.length==0) { cursor++; continue; }
+            ZDICT_insertDictItem(dictList, dictListSize, solution, buffer);
+            cursor += solution.length;
+            DISPLAYUPDATE(2, "\r%4.2f %% \r", (double)cursor / bufferSize * 100);
+    }   }
+
+_cleanup:
+    free(suffix0);
+    free(reverseSuffix);
+    free(doneMarks);
+    free(filePos);
+    return result;
+}
+
+
+static void ZDICT_fillNoise(void* buffer, size_t length)
+{
+    unsigned const prime1 = 2654435761U;
+    unsigned const prime2 = 2246822519U;
+    unsigned acc = prime1;
+    size_t p=0;;
+    for (p=0; p<length; p++) {
+        acc *= prime2;
+        ((unsigned char*)buffer)[p] = (unsigned char)(acc >> 21);
+    }
+}
+
+
+typedef struct
+{
+    ZSTD_CCtx* ref;    /* contains reference to dictionary */
+    ZSTD_CCtx* zc;     /* working context */
+    void* workPlace;   /* must be ZSTD_BLOCKSIZE_MAX allocated */
+} EStats_ress_t;
+
+#define MAXREPOFFSET 1024
+
+static void ZDICT_countEStats(EStats_ress_t esr, ZSTD_parameters params,
+                              U32* countLit, U32* offsetcodeCount, U32* matchlengthCount, U32* litlengthCount, U32* repOffsets,
+                              const void* src, size_t srcSize,
+                              U32 notificationLevel)
+{
+    size_t const blockSizeMax = MIN (ZSTD_BLOCKSIZE_MAX, 1 << params.cParams.windowLog);
+    size_t cSize;
+
+    if (srcSize > blockSizeMax) srcSize = blockSizeMax;   /* protection vs large samples */
+    {   size_t const errorCode = ZSTD_copyCCtx(esr.zc, esr.ref, 0);
+        if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_copyCCtx failed \n"); return; }
+    }
+    cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize);
+    if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (U32)srcSize); return; }
+
+    if (cSize) {  /* if == 0; block is not compressible */
+        const seqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc);
+
+        /* literals stats */
+        {   const BYTE* bytePtr;
+            for(bytePtr = seqStorePtr->litStart; bytePtr < seqStorePtr->lit; bytePtr++)
+                countLit[*bytePtr]++;
+        }
+
+        /* seqStats */
+        {   U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+            ZSTD_seqToCodes(seqStorePtr);
+
+            {   const BYTE* codePtr = seqStorePtr->ofCode;
+                U32 u;
+                for (u=0; u<nbSeq; u++) offsetcodeCount[codePtr[u]]++;
+            }
+
+            {   const BYTE* codePtr = seqStorePtr->mlCode;
+                U32 u;
+                for (u=0; u<nbSeq; u++) matchlengthCount[codePtr[u]]++;
+            }
+
+            {   const BYTE* codePtr = seqStorePtr->llCode;
+                U32 u;
+                for (u=0; u<nbSeq; u++) litlengthCount[codePtr[u]]++;
+            }
+
+            if (nbSeq >= 2) { /* rep offsets */
+                const seqDef* const seq = seqStorePtr->sequencesStart;
+                U32 offset1 = seq[0].offset - 3;
+                U32 offset2 = seq[1].offset - 3;
+                if (offset1 >= MAXREPOFFSET) offset1 = 0;
+                if (offset2 >= MAXREPOFFSET) offset2 = 0;
+                repOffsets[offset1] += 3;
+                repOffsets[offset2] += 1;
+    }   }   }
+}
+
+static size_t ZDICT_totalSampleSize(const size_t* fileSizes, unsigned nbFiles)
+{
+    size_t total=0;
+    unsigned u;
+    for (u=0; u<nbFiles; u++) total += fileSizes[u];
+    return total;
+}
+
+typedef struct { U32 offset; U32 count; } offsetCount_t;
+
+static void ZDICT_insertSortCount(offsetCount_t table[ZSTD_REP_NUM+1], U32 val, U32 count)
+{
+    U32 u;
+    table[ZSTD_REP_NUM].offset = val;
+    table[ZSTD_REP_NUM].count = count;
+    for (u=ZSTD_REP_NUM; u>0; u--) {
+        offsetCount_t tmp;
+        if (table[u-1].count >= table[u].count) break;
+        tmp = table[u-1];
+        table[u-1] = table[u];
+        table[u] = tmp;
+    }
+}
+
+/* ZDICT_flatLit() :
+ * rewrite `countLit` to contain a mostly flat but still compressible distribution of literals.
+ * necessary to avoid generating a non-compressible distribution that HUF_writeCTable() cannot encode.
+ */
+static void ZDICT_flatLit(U32* countLit)
+{
+    int u;
+    for (u=1; u<256; u++) countLit[u] = 2;
+    countLit[0]   = 4;
+    countLit[253] = 1;
+    countLit[254] = 1;
+}
+
+#define OFFCODE_MAX 30  /* only applicable to first block */
+static size_t ZDICT_analyzeEntropy(void*  dstBuffer, size_t maxDstSize,
+                                   unsigned compressionLevel,
+                             const void*  srcBuffer, const size_t* fileSizes, unsigned nbFiles,
+                             const void* dictBuffer, size_t  dictBufferSize,
+                                   unsigned notificationLevel)
+{
+    U32 countLit[256];
+    HUF_CREATE_STATIC_CTABLE(hufTable, 255);
+    U32 offcodeCount[OFFCODE_MAX+1];
+    short offcodeNCount[OFFCODE_MAX+1];
+    U32 offcodeMax = ZSTD_highbit32((U32)(dictBufferSize + 128 KB));
+    U32 matchLengthCount[MaxML+1];
+    short matchLengthNCount[MaxML+1];
+    U32 litLengthCount[MaxLL+1];
+    short litLengthNCount[MaxLL+1];
+    U32 repOffset[MAXREPOFFSET];
+    offsetCount_t bestRepOffset[ZSTD_REP_NUM+1];
+    EStats_ress_t esr;
+    ZSTD_parameters params;
+    U32 u, huffLog = 11, Offlog = OffFSELog, mlLog = MLFSELog, llLog = LLFSELog, total;
+    size_t pos = 0, errorCode;
+    size_t eSize = 0;
+    size_t const totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles);
+    size_t const averageSampleSize = totalSrcSize / (nbFiles + !nbFiles);
+    BYTE* dstPtr = (BYTE*)dstBuffer;
+
+    /* init */
+    DEBUGLOG(4, "ZDICT_analyzeEntropy");
+    esr.ref = ZSTD_createCCtx();
+    esr.zc = ZSTD_createCCtx();
+    esr.workPlace = malloc(ZSTD_BLOCKSIZE_MAX);
+    if (!esr.ref || !esr.zc || !esr.workPlace) {
+        eSize = ERROR(memory_allocation);
+        DISPLAYLEVEL(1, "Not enough memory \n");
+        goto _cleanup;
+    }
+    if (offcodeMax>OFFCODE_MAX) { eSize = ERROR(dictionaryCreation_failed); goto _cleanup; }   /* too large dictionary */
+    for (u=0; u<256; u++) countLit[u] = 1;   /* any character must be described */
+    for (u=0; u<=offcodeMax; u++) offcodeCount[u] = 1;
+    for (u=0; u<=MaxML; u++) matchLengthCount[u] = 1;
+    for (u=0; u<=MaxLL; u++) litLengthCount[u] = 1;
+    memset(repOffset, 0, sizeof(repOffset));
+    repOffset[1] = repOffset[4] = repOffset[8] = 1;
+    memset(bestRepOffset, 0, sizeof(bestRepOffset));
+    if (compressionLevel<=0) compressionLevel = g_compressionLevel_default;
+    params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize);
+    {   size_t const beginResult = ZSTD_compressBegin_advanced(esr.ref, dictBuffer, dictBufferSize, params, 0);
+        if (ZSTD_isError(beginResult)) {
+            DISPLAYLEVEL(1, "error : ZSTD_compressBegin_advanced() failed : %s \n", ZSTD_getErrorName(beginResult));
+            eSize = ERROR(GENERIC);
+            goto _cleanup;
+    }   }
+
+    /* collect stats on all samples */
+    for (u=0; u<nbFiles; u++) {
+        ZDICT_countEStats(esr, params,
+                          countLit, offcodeCount, matchLengthCount, litLengthCount, repOffset,
+                         (const char*)srcBuffer + pos, fileSizes[u],
+                          notificationLevel);
+        pos += fileSizes[u];
+    }
+
+    /* analyze, build stats, starting with literals */
+    {   size_t maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog);
+        if (HUF_isError(maxNbBits)) {
+            eSize = ERROR(GENERIC);
+            DISPLAYLEVEL(1, " HUF_buildCTable error \n");
+            goto _cleanup;
+        }
+        if (maxNbBits==8) {  /* not compressible : will fail on HUF_writeCTable() */
+            DISPLAYLEVEL(2, "warning : pathological dataset : literals are not compressible : samples are noisy or too regular \n");
+            ZDICT_flatLit(countLit);  /* replace distribution by a fake "mostly flat but still compressible" distribution, that HUF_writeCTable() can encode */
+            maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog);
+            assert(maxNbBits==9);
+        }
+        huffLog = (U32)maxNbBits;
+    }
+
+    /* looking for most common first offsets */
+    {   U32 offset;
+        for (offset=1; offset<MAXREPOFFSET; offset++)
+            ZDICT_insertSortCount(bestRepOffset, offset, repOffset[offset]);
+    }
+    /* note : the result of this phase should be used to better appreciate the impact on statistics */
+
+    total=0; for (u=0; u<=offcodeMax; u++) total+=offcodeCount[u];
+    errorCode = FSE_normalizeCount(offcodeNCount, Offlog, offcodeCount, total, offcodeMax);
+    if (FSE_isError(errorCode)) {
+        eSize = ERROR(GENERIC);
+        DISPLAYLEVEL(1, "FSE_normalizeCount error with offcodeCount \n");
+        goto _cleanup;
+    }
+    Offlog = (U32)errorCode;
+
+    total=0; for (u=0; u<=MaxML; u++) total+=matchLengthCount[u];
+    errorCode = FSE_normalizeCount(matchLengthNCount, mlLog, matchLengthCount, total, MaxML);
+    if (FSE_isError(errorCode)) {
+        eSize = ERROR(GENERIC);
+        DISPLAYLEVEL(1, "FSE_normalizeCount error with matchLengthCount \n");
+        goto _cleanup;
+    }
+    mlLog = (U32)errorCode;
+
+    total=0; for (u=0; u<=MaxLL; u++) total+=litLengthCount[u];
+    errorCode = FSE_normalizeCount(litLengthNCount, llLog, litLengthCount, total, MaxLL);
+    if (FSE_isError(errorCode)) {
+        eSize = ERROR(GENERIC);
+        DISPLAYLEVEL(1, "FSE_normalizeCount error with litLengthCount \n");
+        goto _cleanup;
+    }
+    llLog = (U32)errorCode;
+
+    /* write result to buffer */
+    {   size_t const hhSize = HUF_writeCTable(dstPtr, maxDstSize, hufTable, 255, huffLog);
+        if (HUF_isError(hhSize)) {
+            eSize = ERROR(GENERIC);
+            DISPLAYLEVEL(1, "HUF_writeCTable error \n");
+            goto _cleanup;
+        }
+        dstPtr += hhSize;
+        maxDstSize -= hhSize;
+        eSize += hhSize;
+    }
+
+    {   size_t const ohSize = FSE_writeNCount(dstPtr, maxDstSize, offcodeNCount, OFFCODE_MAX, Offlog);
+        if (FSE_isError(ohSize)) {
+            eSize = ERROR(GENERIC);
+            DISPLAYLEVEL(1, "FSE_writeNCount error with offcodeNCount \n");
+            goto _cleanup;
+        }
+        dstPtr += ohSize;
+        maxDstSize -= ohSize;
+        eSize += ohSize;
+    }
+
+    {   size_t const mhSize = FSE_writeNCount(dstPtr, maxDstSize, matchLengthNCount, MaxML, mlLog);
+        if (FSE_isError(mhSize)) {
+            eSize = ERROR(GENERIC);
+            DISPLAYLEVEL(1, "FSE_writeNCount error with matchLengthNCount \n");
+            goto _cleanup;
+        }
+        dstPtr += mhSize;
+        maxDstSize -= mhSize;
+        eSize += mhSize;
+    }
+
+    {   size_t const lhSize = FSE_writeNCount(dstPtr, maxDstSize, litLengthNCount, MaxLL, llLog);
+        if (FSE_isError(lhSize)) {
+            eSize = ERROR(GENERIC);
+            DISPLAYLEVEL(1, "FSE_writeNCount error with litlengthNCount \n");
+            goto _cleanup;
+        }
+        dstPtr += lhSize;
+        maxDstSize -= lhSize;
+        eSize += lhSize;
+    }
+
+    if (maxDstSize<12) {
+        eSize = ERROR(GENERIC);
+        DISPLAYLEVEL(1, "not enough space to write RepOffsets \n");
+        goto _cleanup;
+    }
+# if 0
+    MEM_writeLE32(dstPtr+0, bestRepOffset[0].offset);
+    MEM_writeLE32(dstPtr+4, bestRepOffset[1].offset);
+    MEM_writeLE32(dstPtr+8, bestRepOffset[2].offset);
+#else
+    /* at this stage, we don't use the result of "most common first offset",
+       as the impact of statistics is not properly evaluated */
+    MEM_writeLE32(dstPtr+0, repStartValue[0]);
+    MEM_writeLE32(dstPtr+4, repStartValue[1]);
+    MEM_writeLE32(dstPtr+8, repStartValue[2]);
+#endif
+    eSize += 12;
+
+_cleanup:
+    ZSTD_freeCCtx(esr.ref);
+    ZSTD_freeCCtx(esr.zc);
+    free(esr.workPlace);
+
+    return eSize;
+}
+
+
+
+size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
+                          const void* customDictContent, size_t dictContentSize,
+                          const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+                          ZDICT_params_t params)
+{
+    size_t hSize;
+#define HBUFFSIZE 256   /* should prove large enough for all entropy headers */
+    BYTE header[HBUFFSIZE];
+    int const compressionLevel = (params.compressionLevel <= 0) ? g_compressionLevel_default : params.compressionLevel;
+    U32 const notificationLevel = params.notificationLevel;
+
+    /* check conditions */
+    DEBUGLOG(4, "ZDICT_finalizeDictionary");
+    if (dictBufferCapacity < dictContentSize) return ERROR(dstSize_tooSmall);
+    if (dictContentSize < ZDICT_CONTENTSIZE_MIN) return ERROR(srcSize_wrong);
+    if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) return ERROR(dstSize_tooSmall);
+
+    /* dictionary header */
+    MEM_writeLE32(header, ZSTD_MAGIC_DICTIONARY);
+    {   U64 const randomID = XXH64(customDictContent, dictContentSize, 0);
+        U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;
+        U32 const dictID = params.dictID ? params.dictID : compliantID;
+        MEM_writeLE32(header+4, dictID);
+    }
+    hSize = 8;
+
+    /* entropy tables */
+    DISPLAYLEVEL(2, "\r%70s\r", "");   /* clean display line */
+    DISPLAYLEVEL(2, "statistics ... \n");
+    {   size_t const eSize = ZDICT_analyzeEntropy(header+hSize, HBUFFSIZE-hSize,
+                                  compressionLevel,
+                                  samplesBuffer, samplesSizes, nbSamples,
+                                  customDictContent, dictContentSize,
+                                  notificationLevel);
+        if (ZDICT_isError(eSize)) return eSize;
+        hSize += eSize;
+    }
+
+    /* copy elements in final buffer ; note : src and dst buffer can overlap */
+    if (hSize + dictContentSize > dictBufferCapacity) dictContentSize = dictBufferCapacity - hSize;
+    {   size_t const dictSize = hSize + dictContentSize;
+        char* dictEnd = (char*)dictBuffer + dictSize;
+        memmove(dictEnd - dictContentSize, customDictContent, dictContentSize);
+        memcpy(dictBuffer, header, hSize);
+        return dictSize;
+    }
+}
+
+
+size_t ZDICT_addEntropyTablesFromBuffer_advanced(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
+                                                 const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+                                                 ZDICT_params_t params)
+{
+    int const compressionLevel = (params.compressionLevel <= 0) ? g_compressionLevel_default : params.compressionLevel;
+    U32 const notificationLevel = params.notificationLevel;
+    size_t hSize = 8;
+
+    /* calculate entropy tables */
+    DISPLAYLEVEL(2, "\r%70s\r", "");   /* clean display line */
+    DISPLAYLEVEL(2, "statistics ... \n");
+    {   size_t const eSize = ZDICT_analyzeEntropy((char*)dictBuffer+hSize, dictBufferCapacity-hSize,
+                                  compressionLevel,
+                                  samplesBuffer, samplesSizes, nbSamples,
+                                  (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize,
+                                  notificationLevel);
+        if (ZDICT_isError(eSize)) return eSize;
+        hSize += eSize;
+    }
+
+    /* add dictionary header (after entropy tables) */
+    MEM_writeLE32(dictBuffer, ZSTD_MAGIC_DICTIONARY);
+    {   U64 const randomID = XXH64((char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, 0);
+        U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;
+        U32 const dictID = params.dictID ? params.dictID : compliantID;
+        MEM_writeLE32((char*)dictBuffer+4, dictID);
+    }
+
+    if (hSize + dictContentSize < dictBufferCapacity)
+        memmove((char*)dictBuffer + hSize, (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize);
+    return MIN(dictBufferCapacity, hSize+dictContentSize);
+}
+
+
+/*! ZDICT_trainFromBuffer_unsafe_legacy() :
+*   Warning : `samplesBuffer` must be followed by noisy guard band.
+*   @return : size of dictionary, or an error code which can be tested with ZDICT_isError()
+*/
+size_t ZDICT_trainFromBuffer_unsafe_legacy(
+                            void* dictBuffer, size_t maxDictSize,
+                            const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+                            ZDICT_legacy_params_t params)
+{
+    U32 const dictListSize = MAX(MAX(DICTLISTSIZE_DEFAULT, nbSamples), (U32)(maxDictSize/16));
+    dictItem* const dictList = (dictItem*)malloc(dictListSize * sizeof(*dictList));
+    unsigned const selectivity = params.selectivityLevel == 0 ? g_selectivity_default : params.selectivityLevel;
+    unsigned const minRep = (selectivity > 30) ? MINRATIO : nbSamples >> selectivity;
+    size_t const targetDictSize = maxDictSize;
+    size_t const samplesBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples);
+    size_t dictSize = 0;
+    U32 const notificationLevel = params.zParams.notificationLevel;
+
+    /* checks */
+    if (!dictList) return ERROR(memory_allocation);
+    if (maxDictSize < ZDICT_DICTSIZE_MIN) { free(dictList); return ERROR(dstSize_tooSmall); }   /* requested dictionary size is too small */
+    if (samplesBuffSize < ZDICT_MIN_SAMPLES_SIZE) { free(dictList); return ERROR(dictionaryCreation_failed); }   /* not enough source to create dictionary */
+
+    /* init */
+    ZDICT_initDictItem(dictList);
+
+    /* build dictionary */
+    ZDICT_trainBuffer_legacy(dictList, dictListSize,
+                       samplesBuffer, samplesBuffSize,
+                       samplesSizes, nbSamples,
+                       minRep, notificationLevel);
+
+    /* display best matches */
+    if (params.zParams.notificationLevel>= 3) {
+        U32 const nb = MIN(25, dictList[0].pos);
+        U32 const dictContentSize = ZDICT_dictSize(dictList);
+        U32 u;
+        DISPLAYLEVEL(3, "\n %u segments found, of total size %u \n", dictList[0].pos-1, dictContentSize);
+        DISPLAYLEVEL(3, "list %u best segments \n", nb-1);
+        for (u=1; u<nb; u++) {
+            U32 const pos = dictList[u].pos;
+            U32 const length = dictList[u].length;
+            U32 const printedLength = MIN(40, length);
+            if ((pos > samplesBuffSize) || ((pos + length) > samplesBuffSize))
+                return ERROR(GENERIC);   /* should never happen */
+            DISPLAYLEVEL(3, "%3u:%3u bytes at pos %8u, savings %7u bytes |",
+                         u, length, pos, dictList[u].savings);
+            ZDICT_printHex((const char*)samplesBuffer+pos, printedLength);
+            DISPLAYLEVEL(3, "| \n");
+    }   }
+
+
+    /* create dictionary */
+    {   U32 dictContentSize = ZDICT_dictSize(dictList);
+        if (dictContentSize < ZDICT_CONTENTSIZE_MIN) { free(dictList); return ERROR(dictionaryCreation_failed); }   /* dictionary content too small */
+        if (dictContentSize < targetDictSize/4) {
+            DISPLAYLEVEL(2, "!  warning : selected content significantly smaller than requested (%u < %u) \n", dictContentSize, (U32)maxDictSize);
+            if (samplesBuffSize < 10 * targetDictSize)
+                DISPLAYLEVEL(2, "!  consider increasing the number of samples (total size : %u MB)\n", (U32)(samplesBuffSize>>20));
+            if (minRep > MINRATIO) {
+                DISPLAYLEVEL(2, "!  consider increasing selectivity to produce larger dictionary (-s%u) \n", selectivity+1);
+                DISPLAYLEVEL(2, "!  note : larger dictionaries are not necessarily better, test its efficiency on samples \n");
+            }
+        }
+
+        if ((dictContentSize > targetDictSize*3) && (nbSamples > 2*MINRATIO) && (selectivity>1)) {
+            U32 proposedSelectivity = selectivity-1;
+            while ((nbSamples >> proposedSelectivity) <= MINRATIO) { proposedSelectivity--; }
+            DISPLAYLEVEL(2, "!  note : calculated dictionary significantly larger than requested (%u > %u) \n", dictContentSize, (U32)maxDictSize);
+            DISPLAYLEVEL(2, "!  consider increasing dictionary size, or produce denser dictionary (-s%u) \n", proposedSelectivity);
+            DISPLAYLEVEL(2, "!  always test dictionary efficiency on real samples \n");
+        }
+
+        /* limit dictionary size */
+        {   U32 const max = dictList->pos;   /* convention : nb of useful elts within dictList */
+            U32 currentSize = 0;
+            U32 n; for (n=1; n<max; n++) {
+                currentSize += dictList[n].length;
+                if (currentSize > targetDictSize) { currentSize -= dictList[n].length; break; }
+            }
+            dictList->pos = n;
+            dictContentSize = currentSize;
+        }
+
+        /* build dict content */
+        {   U32 u;
+            BYTE* ptr = (BYTE*)dictBuffer + maxDictSize;
+            for (u=1; u<dictList->pos; u++) {
+                U32 l = dictList[u].length;
+                ptr -= l;
+                if (ptr<(BYTE*)dictBuffer) { free(dictList); return ERROR(GENERIC); }   /* should not happen */
+                memcpy(ptr, (const char*)samplesBuffer+dictList[u].pos, l);
+        }   }
+
+        dictSize = ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, maxDictSize,
+                                                             samplesBuffer, samplesSizes, nbSamples,
+                                                             params.zParams);
+    }
+
+    /* clean up */
+    free(dictList);
+    return dictSize;
+}
+
+
+/* ZDICT_trainFromBuffer_legacy() :
+ * issue : samplesBuffer need to be followed by a noisy guard band.
+ * work around : duplicate the buffer, and add the noise */
+size_t ZDICT_trainFromBuffer_legacy(void* dictBuffer, size_t dictBufferCapacity,
+                              const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+                              ZDICT_legacy_params_t params)
+{
+    size_t result;
+    void* newBuff;
+    size_t const sBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples);
+    if (sBuffSize < ZDICT_MIN_SAMPLES_SIZE) return 0;   /* not enough content => no dictionary */
+
+    newBuff = malloc(sBuffSize + NOISELENGTH);
+    if (!newBuff) return ERROR(memory_allocation);
+
+    memcpy(newBuff, samplesBuffer, sBuffSize);
+    ZDICT_fillNoise((char*)newBuff + sBuffSize, NOISELENGTH);   /* guard band, for end of buffer condition */
+
+    result =
+        ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, dictBufferCapacity, newBuff,
+                                            samplesSizes, nbSamples, params);
+    free(newBuff);
+    return result;
+}
+
+
+size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
+                             const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)
+{
+    ZDICT_cover_params_t params;
+    DEBUGLOG(3, "ZDICT_trainFromBuffer");
+    memset(&params, 0, sizeof(params));
+    params.d = 8;
+    params.steps = 4;
+    /* Default to level 6 since no compression level information is available */
+    params.zParams.compressionLevel = 6;
+#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1)
+    params.zParams.notificationLevel = ZSTD_DEBUG;
+#endif
+    return ZDICT_optimizeTrainFromBuffer_cover(dictBuffer, dictBufferCapacity,
+                                               samplesBuffer, samplesSizes, nbSamples,
+                                               &params);
+}
+
+size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
+                                  const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)
+{
+    ZDICT_params_t params;
+    memset(&params, 0, sizeof(params));
+    return ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, dictBufferCapacity,
+                                                     samplesBuffer, samplesSizes, nbSamples,
+                                                     params);
+}
diff --git a/vendor/github.com/DataDog/zstd/zdict.h b/vendor/github.com/DataDog/zstd/zdict.h
new file mode 100644
index 0000000..ad459c2
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zdict.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef DICTBUILDER_H_001
+#define DICTBUILDER_H_001
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*======  Dependencies  ======*/
+#include <stddef.h>  /* size_t */
+
+
+/* =====   ZDICTLIB_API : control library symbols visibility   ===== */
+#ifndef ZDICTLIB_VISIBILITY
+#  if defined(__GNUC__) && (__GNUC__ >= 4)
+#    define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default")))
+#  else
+#    define ZDICTLIB_VISIBILITY
+#  endif
+#endif
+#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
+#  define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY
+#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
+#  define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+#  define ZDICTLIB_API ZDICTLIB_VISIBILITY
+#endif
+
+
+/*! ZDICT_trainFromBuffer():
+ *  Train a dictionary from an array of samples.
+ *  Redirect towards ZDICT_optimizeTrainFromBuffer_cover() single-threaded, with d=8 and steps=4.
+ *  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
+ *  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
+ *  The resulting dictionary will be saved into `dictBuffer`.
+ * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+ *          or an error code, which can be tested with ZDICT_isError().
+ *  Note: ZDICT_trainFromBuffer() requires about 9 bytes of memory for each input byte.
+ *  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
+ *        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
+ *        In general, it's recommended to provide a few thousands samples, though this can vary a lot.
+ *        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
+ */
+ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
+                                    const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
+
+
+/*======   Helper functions   ======*/
+ZDICTLIB_API unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize);  /**< extracts dictID; @return zero if error (not a valid dictionary) */
+ZDICTLIB_API unsigned ZDICT_isError(size_t errorCode);
+ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode);
+
+
+
+#ifdef ZDICT_STATIC_LINKING_ONLY
+
+/* ====================================================================================
+ * The definitions in this section are considered experimental.
+ * They should never be used with a dynamic library, as they may change in the future.
+ * They are provided for advanced usages.
+ * Use them only in association with static linking.
+ * ==================================================================================== */
+
+typedef struct {
+    int      compressionLevel;   /* optimize for a specific zstd compression level; 0 means default */
+    unsigned notificationLevel;  /* Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */
+    unsigned dictID;             /* force dictID value; 0 means auto mode (32-bits random value) */
+} ZDICT_params_t;
+
+/*! ZDICT_cover_params_t:
+ *  k and d are the only required parameters.
+ *  For others, value 0 means default.
+ */
+typedef struct {
+    unsigned k;                  /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */
+    unsigned d;                  /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */
+    unsigned steps;              /* Number of steps : Only used for optimization : 0 means default (32) : Higher means more parameters checked */
+    unsigned nbThreads;          /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */
+    ZDICT_params_t zParams;
+} ZDICT_cover_params_t;
+
+
+/*! ZDICT_trainFromBuffer_cover():
+ *  Train a dictionary from an array of samples using the COVER algorithm.
+ *  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
+ *  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
+ *  The resulting dictionary will be saved into `dictBuffer`.
+ * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+ *          or an error code, which can be tested with ZDICT_isError().
+ *  Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte.
+ *  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
+ *        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
+ *        In general, it's recommended to provide a few thousands samples, though this can vary a lot.
+ *        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
+ */
+ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
+          void *dictBuffer, size_t dictBufferCapacity,
+    const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
+          ZDICT_cover_params_t parameters);
+
+/*! ZDICT_optimizeTrainFromBuffer_cover():
+ * The same requirements as above hold for all the parameters except `parameters`.
+ * This function tries many parameter combinations and picks the best parameters.
+ * `*parameters` is filled with the best parameters found,
+ * dictionary constructed with those parameters is stored in `dictBuffer`.
+ *
+ * All of the parameters d, k, steps are optional.
+ * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8, 10, 12, 14, 16}.
+ * if steps is zero it defaults to its default value.
+ * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [16, 2048].
+ *
+ * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+ *           or an error code, which can be tested with ZDICT_isError().
+ *           On success `*parameters` contains the parameters selected.
+ * Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread.
+ */
+ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
+          void* dictBuffer, size_t dictBufferCapacity,
+    const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+          ZDICT_cover_params_t* parameters);
+
+/*! ZDICT_finalizeDictionary():
+ * Given a custom content as a basis for dictionary, and a set of samples,
+ * finalize dictionary by adding headers and statistics.
+ *
+ * Samples must be stored concatenated in a flat buffer `samplesBuffer`,
+ * supplied with an array of sizes `samplesSizes`, providing the size of each sample in order.
+ *
+ * dictContentSize must be >= ZDICT_CONTENTSIZE_MIN bytes.
+ * maxDictSize must be >= dictContentSize, and must be >= ZDICT_DICTSIZE_MIN bytes.
+ *
+ * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`),
+ *           or an error code, which can be tested by ZDICT_isError().
+ * Note: ZDICT_finalizeDictionary() will push notifications into stderr if instructed to, using notificationLevel>0.
+ * Note 2: dictBuffer and dictContent can overlap
+ */
+#define ZDICT_CONTENTSIZE_MIN 128
+#define ZDICT_DICTSIZE_MIN    256
+ZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
+                                const void* dictContent, size_t dictContentSize,
+                                const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
+                                ZDICT_params_t parameters);
+
+typedef struct {
+    unsigned selectivityLevel;   /* 0 means default; larger => select more => larger dictionary */
+    ZDICT_params_t zParams;
+} ZDICT_legacy_params_t;
+
+/*! ZDICT_trainFromBuffer_legacy():
+ *  Train a dictionary from an array of samples.
+ *  Samples must be stored concatenated in a single flat buffer `samplesBuffer`,
+ *  supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.
+ *  The resulting dictionary will be saved into `dictBuffer`.
+ * `parameters` is optional and can be provided with values set to 0 to mean "default".
+ * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)
+ *          or an error code, which can be tested with ZDICT_isError().
+ *  Tips: In general, a reasonable dictionary has a size of ~ 100 KB.
+ *        It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.
+ *        In general, it's recommended to provide a few thousands samples, though this can vary a lot.
+ *        It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
+ *  Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0.
+ */
+ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy(
+    void *dictBuffer, size_t dictBufferCapacity,
+    const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
+    ZDICT_legacy_params_t parameters);
+
+/* Deprecation warnings */
+/* It is generally possible to disable deprecation warnings from compiler,
+   for example with -Wno-deprecated-declarations for gcc
+   or _CRT_SECURE_NO_WARNINGS in Visual.
+   Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */
+#ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS
+#  define ZDICT_DEPRECATED(message) ZDICTLIB_API   /* disable deprecation warnings */
+#else
+#  define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+#  if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
+#    define ZDICT_DEPRECATED(message) [[deprecated(message)]] ZDICTLIB_API
+#  elif (ZDICT_GCC_VERSION >= 405) || defined(__clang__)
+#    define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message)))
+#  elif (ZDICT_GCC_VERSION >= 301)
+#    define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated))
+#  elif defined(_MSC_VER)
+#    define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message))
+#  else
+#    pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler")
+#    define ZDICT_DEPRECATED(message) ZDICTLIB_API
+#  endif
+#endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */
+
+ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead")
+size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
+                                  const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
+
+
+#endif   /* ZDICT_STATIC_LINKING_ONLY */
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif   /* DICTBUILDER_H_001 */
diff --git a/vendor/github.com/DataDog/zstd/zstd.go b/vendor/github.com/DataDog/zstd/zstd.go
new file mode 100644
index 0000000..e9953d6
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd.go
@@ -0,0 +1,144 @@
+package zstd
+
+/*
+#define ZSTD_STATIC_LINKING_ONLY
+#include "zstd.h"
+#include "stdint.h"  // for uintptr_t
+
+// The following *_wrapper function are used for removing superflouos
+// memory allocations when calling the wrapped functions from Go code.
+// See https://github.com/golang/go/issues/24450 for details.
+
+static size_t ZSTD_compress_wrapper(uintptr_t dst, size_t maxDstSize, const uintptr_t src, size_t srcSize, int compressionLevel) {
+	return ZSTD_compress((void*)dst, maxDstSize, (const void*)src, srcSize, compressionLevel);
+}
+
+static size_t ZSTD_decompress_wrapper(uintptr_t dst, size_t maxDstSize, uintptr_t src, size_t srcSize) {
+	return ZSTD_decompress((void*)dst, maxDstSize, (const void *)src, srcSize);
+}
+
+*/
+import "C"
+import (
+	"bytes"
+	"errors"
+	"io/ioutil"
+	"unsafe"
+)
+
+// Defines best and standard values for zstd cli
+const (
+	BestSpeed          = 1
+	BestCompression    = 20
+	DefaultCompression = 5
+)
+
+var (
+	// ErrEmptySlice is returned when there is nothing to compress
+	ErrEmptySlice = errors.New("Bytes slice is empty")
+)
+
+// CompressBound returns the worst case size needed for a destination buffer,
+// which can be used to preallocate a destination buffer or select a previously
+// allocated buffer from a pool.
+// See zstd.h to mirror implementation of ZSTD_COMPRESSBOUND
+func CompressBound(srcSize int) int {
+	lowLimit := 128 << 10 // 128 kB
+	var margin int
+	if srcSize < lowLimit {
+		margin = (lowLimit - srcSize) >> 11
+	}
+	return srcSize + (srcSize >> 8) + margin
+}
+
+// cCompressBound is a cgo call to check the go implementation above against the c code.
+func cCompressBound(srcSize int) int {
+	return int(C.ZSTD_compressBound(C.size_t(srcSize)))
+}
+
+// Compress src into dst.  If you have a buffer to use, you can pass it to
+// prevent allocation.  If it is too small, or if nil is passed, a new buffer
+// will be allocated and returned.
+func Compress(dst, src []byte) ([]byte, error) {
+	return CompressLevel(dst, src, DefaultCompression)
+}
+
+// CompressLevel is the same as Compress but you can pass a compression level
+func CompressLevel(dst, src []byte, level int) ([]byte, error) {
+	bound := CompressBound(len(src))
+	if cap(dst) >= bound {
+		dst = dst[0:bound] // Reuse dst buffer
+	} else {
+		dst = make([]byte, bound)
+	}
+
+	srcPtr := C.uintptr_t(uintptr(0)) // Do not point anywhere, if src is empty
+	if len(src) > 0 {
+		srcPtr = C.uintptr_t(uintptr(unsafe.Pointer(&src[0])))
+	}
+
+	cWritten := C.ZSTD_compress_wrapper(
+		C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))),
+		C.size_t(len(dst)),
+		srcPtr,
+		C.size_t(len(src)),
+		C.int(level))
+
+	written := int(cWritten)
+	// Check if the return is an Error code
+	if err := getError(written); err != nil {
+		return nil, err
+	}
+	return dst[:written], nil
+}
+
+// Decompress src into dst.  If you have a buffer to use, you can pass it to
+// prevent allocation.  If it is too small, or if nil is passed, a new buffer
+// will be allocated and returned.
+func Decompress(dst, src []byte) ([]byte, error) {
+	if len(src) == 0 {
+		return []byte{}, ErrEmptySlice
+	}
+	decompress := func(dst, src []byte) ([]byte, error) {
+
+		cWritten := C.ZSTD_decompress_wrapper(
+			C.uintptr_t(uintptr(unsafe.Pointer(&dst[0]))),
+			C.size_t(len(dst)),
+			C.uintptr_t(uintptr(unsafe.Pointer(&src[0]))),
+			C.size_t(len(src)))
+
+		written := int(cWritten)
+		// Check error
+		if err := getError(written); err != nil {
+			return nil, err
+		}
+		return dst[:written], nil
+	}
+
+	if dst == nil {
+		// Attempt to use zStd to determine decompressed size (may result in error or 0)
+		size := int(C.size_t(C.ZSTD_getDecompressedSize(unsafe.Pointer(&src[0]), C.size_t(len(src)))))
+
+		if err := getError(size); err != nil {
+			return nil, err
+		}
+
+		if size > 0 {
+			dst = make([]byte, size)
+		} else {
+			dst = make([]byte, len(src)*3) // starting guess
+		}
+	}
+	for i := 0; i < 3; i++ { // 3 tries to allocate a bigger buffer
+		result, err := decompress(dst, src)
+		if !IsDstSizeTooSmallError(err) {
+			return result, err
+		}
+		dst = make([]byte, len(dst)*2) // Grow buffer by 2
+	}
+
+	// We failed getting a dst buffer of correct size, use stream API
+	r := NewReader(bytes.NewReader(src))
+	defer r.Close()
+	return ioutil.ReadAll(r)
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd.h b/vendor/github.com/DataDog/zstd/zstd.h
new file mode 100644
index 0000000..6405da6
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd.h
@@ -0,0 +1,1399 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#ifndef ZSTD_H_235446
+#define ZSTD_H_235446
+
+/* ======   Dependency   ======*/
+#include <stddef.h>   /* size_t */
+
+
+/* =====   ZSTDLIB_API : control library symbols visibility   ===== */
+#ifndef ZSTDLIB_VISIBILITY
+#  if defined(__GNUC__) && (__GNUC__ >= 4)
+#    define ZSTDLIB_VISIBILITY __attribute__ ((visibility ("default")))
+#  else
+#    define ZSTDLIB_VISIBILITY
+#  endif
+#endif
+#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
+#  define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBILITY
+#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
+#  define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+#  define ZSTDLIB_API ZSTDLIB_VISIBILITY
+#endif
+
+
+/*******************************************************************************************************
+  Introduction
+
+  zstd, short for Zstandard, is a fast lossless compression algorithm,
+  targeting real-time compression scenarios at zlib-level and better compression ratios.
+  The zstd compression library provides in-memory compression and decompression functions.
+  The library supports compression levels from 1 up to ZSTD_maxCLevel() which is currently 22.
+  Levels >= 20, labeled `--ultra`, should be used with caution, as they require more memory.
+  Compression can be done in:
+    - a single step (described as Simple API)
+    - a single step, reusing a context (described as Explicit context)
+    - unbounded multiple steps (described as Streaming compression)
+  The compression ratio achievable on small data can be highly improved using a dictionary in:
+    - a single step (described as Simple dictionary API)
+    - a single step, reusing a dictionary (described as Bulk-processing dictionary API)
+
+  Advanced experimental functions can be accessed using #define ZSTD_STATIC_LINKING_ONLY before including zstd.h.
+  Advanced experimental APIs shall never be used with a dynamic library.
+  They are not "stable", their definition may change in the future. Only static linking is allowed.
+*********************************************************************************************************/
+
+/*------   Version   ------*/
+#define ZSTD_VERSION_MAJOR    1
+#define ZSTD_VERSION_MINOR    3
+#define ZSTD_VERSION_RELEASE  4
+
+#define ZSTD_VERSION_NUMBER  (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
+ZSTDLIB_API unsigned ZSTD_versionNumber(void);   /**< useful to check dll version */
+
+#define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
+#define ZSTD_QUOTE(str) #str
+#define ZSTD_EXPAND_AND_QUOTE(str) ZSTD_QUOTE(str)
+#define ZSTD_VERSION_STRING ZSTD_EXPAND_AND_QUOTE(ZSTD_LIB_VERSION)
+ZSTDLIB_API const char* ZSTD_versionString(void);   /* added in v1.3.0 */
+
+
+/***************************************
+*  Simple API
+***************************************/
+/*! ZSTD_compress() :
+ *  Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
+ *  Hint : compression runs faster if `dstCapacity` >=  `ZSTD_compressBound(srcSize)`.
+ *  @return : compressed size written into `dst` (<= `dstCapacity),
+ *            or an error code if it fails (which can be tested using ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
+                            const void* src, size_t srcSize,
+                                  int compressionLevel);
+
+/*! ZSTD_decompress() :
+ *  `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
+ *  `dstCapacity` is an upper bound of originalSize to regenerate.
+ *  If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
+ *  @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
+ *            or an errorCode if it fails (which can be tested using ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
+                              const void* src, size_t compressedSize);
+
+/*! ZSTD_getFrameContentSize() : added in v1.3.0
+ *  `src` should point to the start of a ZSTD encoded frame.
+ *  `srcSize` must be at least as large as the frame header.
+ *            hint : any size >= `ZSTD_frameHeaderSize_max` is large enough.
+ *  @return : - decompressed size of the frame in `src`, if known
+ *            - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
+ *            - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
+ *   note 1 : a 0 return value means the frame is valid but "empty".
+ *   note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode.
+ *            When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
+ *            In which case, it's necessary to use streaming mode to decompress data.
+ *            Optionally, application can rely on some implicit limit,
+ *            as ZSTD_decompress() only needs an upper bound of decompressed size.
+ *            (For example, data could be necessarily cut into blocks <= 16 KB).
+ *   note 3 : decompressed size is always present when compression is done with ZSTD_compress()
+ *   note 4 : decompressed size can be very large (64-bits value),
+ *            potentially larger than what local system can handle as a single memory segment.
+ *            In which case, it's necessary to use streaming mode to decompress data.
+ *   note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified.
+ *            Always ensure return value fits within application's authorized limits.
+ *            Each application can set its own limits.
+ *   note 6 : This function replaces ZSTD_getDecompressedSize() */
+#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
+#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
+ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
+
+/*! ZSTD_getDecompressedSize() :
+ *  NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize().
+ *  Both functions work the same way, but ZSTD_getDecompressedSize() blends
+ *  "empty", "unknown" and "error" results to the same return value (0),
+ *  while ZSTD_getFrameContentSize() gives them separate return values.
+ * `src` is the start of a zstd compressed frame.
+ * @return : content size to be decompressed, as a 64-bits value _if known and not empty_, 0 otherwise. */
+ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
+
+
+/*======  Helper functions  ======*/
+#define ZSTD_COMPRESSBOUND(srcSize)   ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0))  /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
+ZSTDLIB_API size_t      ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
+ZSTDLIB_API unsigned    ZSTD_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */
+ZSTDLIB_API const char* ZSTD_getErrorName(size_t code);     /*!< provides readable string from an error code */
+ZSTDLIB_API int         ZSTD_maxCLevel(void);               /*!< maximum compression level available */
+
+
+/***************************************
+*  Explicit context
+***************************************/
+/*= Compression context
+ *  When compressing many times,
+ *  it is recommended to allocate a context just once, and re-use it for each successive compression operation.
+ *  This will make workload friendlier for system's memory.
+ *  Use one context per thread for parallel execution in multi-threaded environments. */
+typedef struct ZSTD_CCtx_s ZSTD_CCtx;
+ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
+ZSTDLIB_API size_t     ZSTD_freeCCtx(ZSTD_CCtx* cctx);
+
+/*! ZSTD_compressCCtx() :
+ *  Same as ZSTD_compress(), requires an allocated ZSTD_CCtx (see ZSTD_createCCtx()). */
+ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* ctx,
+                                     void* dst, size_t dstCapacity,
+                               const void* src, size_t srcSize,
+                                     int compressionLevel);
+
+/*= Decompression context
+ *  When decompressing many times,
+ *  it is recommended to allocate a context only once,
+ *  and re-use it for each successive compression operation.
+ *  This will make workload friendlier for system's memory.
+ *  Use one context per thread for parallel execution. */
+typedef struct ZSTD_DCtx_s ZSTD_DCtx;
+ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void);
+ZSTDLIB_API size_t     ZSTD_freeDCtx(ZSTD_DCtx* dctx);
+
+/*! ZSTD_decompressDCtx() :
+ *  Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx (see ZSTD_createDCtx()) */
+ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* ctx,
+                                       void* dst, size_t dstCapacity,
+                                 const void* src, size_t srcSize);
+
+
+/**************************
+*  Simple dictionary API
+***************************/
+/*! ZSTD_compress_usingDict() :
+ *  Compression using a predefined Dictionary (see dictBuilder/zdict.h).
+ *  Note : This function loads the dictionary, resulting in significant startup delay.
+ *  Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
+                                           void* dst, size_t dstCapacity,
+                                     const void* src, size_t srcSize,
+                                     const void* dict,size_t dictSize,
+                                           int compressionLevel);
+
+/*! ZSTD_decompress_usingDict() :
+ *  Decompression using a predefined Dictionary (see dictBuilder/zdict.h).
+ *  Dictionary must be identical to the one used during compression.
+ *  Note : This function loads the dictionary, resulting in significant startup delay.
+ *  Note : When `dict == NULL || dictSize < 8` no dictionary is used. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
+                                             void* dst, size_t dstCapacity,
+                                       const void* src, size_t srcSize,
+                                       const void* dict,size_t dictSize);
+
+
+/**********************************
+ *  Bulk processing dictionary API
+ *********************************/
+typedef struct ZSTD_CDict_s ZSTD_CDict;
+
+/*! ZSTD_createCDict() :
+ *  When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
+ *  ZSTD_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
+ *  ZSTD_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
+ *  `dictBuffer` can be released after ZSTD_CDict creation, since its content is copied within CDict */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize,
+                                         int compressionLevel);
+
+/*! ZSTD_freeCDict() :
+ *  Function frees memory allocated by ZSTD_createCDict(). */
+ZSTDLIB_API size_t      ZSTD_freeCDict(ZSTD_CDict* CDict);
+
+/*! ZSTD_compress_usingCDict() :
+ *  Compression using a digested Dictionary.
+ *  Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
+ *  Note that compression level is decided during dictionary creation.
+ *  Frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */
+ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
+                                            void* dst, size_t dstCapacity,
+                                      const void* src, size_t srcSize,
+                                      const ZSTD_CDict* cdict);
+
+
+typedef struct ZSTD_DDict_s ZSTD_DDict;
+
+/*! ZSTD_createDDict() :
+ *  Create a digested dictionary, ready to start decompression operation without startup delay.
+ *  dictBuffer can be released after DDict creation, as its content is copied inside DDict */
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
+
+/*! ZSTD_freeDDict() :
+ *  Function frees memory allocated with ZSTD_createDDict() */
+ZSTDLIB_API size_t      ZSTD_freeDDict(ZSTD_DDict* ddict);
+
+/*! ZSTD_decompress_usingDDict() :
+ *  Decompression using a digested Dictionary.
+ *  Faster startup than ZSTD_decompress_usingDict(), recommended when same dictionary is used multiple times. */
+ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
+                                              void* dst, size_t dstCapacity,
+                                        const void* src, size_t srcSize,
+                                        const ZSTD_DDict* ddict);
+
+
+/****************************
+*  Streaming
+****************************/
+
+typedef struct ZSTD_inBuffer_s {
+  const void* src;    /**< start of input buffer */
+  size_t size;        /**< size of input buffer */
+  size_t pos;         /**< position where reading stopped. Will be updated. Necessarily 0 <= pos <= size */
+} ZSTD_inBuffer;
+
+typedef struct ZSTD_outBuffer_s {
+  void*  dst;         /**< start of output buffer */
+  size_t size;        /**< size of output buffer */
+  size_t pos;         /**< position where writing stopped. Will be updated. Necessarily 0 <= pos <= size */
+} ZSTD_outBuffer;
+
+
+
+/*-***********************************************************************
+*  Streaming compression - HowTo
+*
+*  A ZSTD_CStream object is required to track streaming operation.
+*  Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources.
+*  ZSTD_CStream objects can be reused multiple times on consecutive compression operations.
+*  It is recommended to re-use ZSTD_CStream in situations where many streaming operations will be achieved consecutively,
+*  since it will play nicer with system's memory, by re-using already allocated memory.
+*  Use one separate ZSTD_CStream per thread for parallel execution.
+*
+*  Start a new compression by initializing ZSTD_CStream.
+*  Use ZSTD_initCStream() to start a new compression operation.
+*  Use ZSTD_initCStream_usingDict() or ZSTD_initCStream_usingCDict() for a compression which requires a dictionary (experimental section)
+*
+*  Use ZSTD_compressStream() repetitively to consume input stream.
+*  The function will automatically update both `pos` fields.
+*  Note that it may not consume the entire input, in which case `pos < size`,
+*  and it's up to the caller to present again remaining data.
+*  @return : a size hint, preferred nb of bytes to use as input for next function call
+*            or an error code, which can be tested using ZSTD_isError().
+*            Note 1 : it's just a hint, to help latency a little, any other value will work fine.
+*            Note 2 : size hint is guaranteed to be <= ZSTD_CStreamInSize()
+*
+*  At any moment, it's possible to flush whatever data remains within internal buffer, using ZSTD_flushStream().
+*  `output->pos` will be updated.
+*  Note that some content might still be left within internal buffer if `output->size` is too small.
+*  @return : nb of bytes still present within internal buffer (0 if it's empty)
+*            or an error code, which can be tested using ZSTD_isError().
+*
+*  ZSTD_endStream() instructs to finish a frame.
+*  It will perform a flush and write frame epilogue.
+*  The epilogue is required for decoders to consider a frame completed.
+*  ZSTD_endStream() may not be able to flush full data if `output->size` is too small.
+*  In which case, call again ZSTD_endStream() to complete the flush.
+*  @return : 0 if frame fully completed and fully flushed,
+             or >0 if some data is still present within internal buffer
+                  (value is minimum size estimation for remaining data to flush, but it could be more)
+*            or an error code, which can be tested using ZSTD_isError().
+*
+* *******************************************************************/
+
+typedef ZSTD_CCtx ZSTD_CStream;  /**< CCtx and CStream are now effectively same object (>= v1.3.0) */
+                                 /* Continue to distinguish them for compatibility with versions <= v1.2.0 */
+/*===== ZSTD_CStream management functions =====*/
+ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
+ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs);
+
+/*===== Streaming compression functions =====*/
+ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
+
+ZSTDLIB_API size_t ZSTD_CStreamInSize(void);    /**< recommended size for input buffer */
+ZSTDLIB_API size_t ZSTD_CStreamOutSize(void);   /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block in all circumstances. */
+
+
+
+/*-***************************************************************************
+*  Streaming decompression - HowTo
+*
+*  A ZSTD_DStream object is required to track streaming operations.
+*  Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources.
+*  ZSTD_DStream objects can be re-used multiple times.
+*
+*  Use ZSTD_initDStream() to start a new decompression operation,
+*   or ZSTD_initDStream_usingDict() if decompression requires a dictionary.
+*   @return : recommended first input size
+*
+*  Use ZSTD_decompressStream() repetitively to consume your input.
+*  The function will update both `pos` fields.
+*  If `input.pos < input.size`, some input has not been consumed.
+*  It's up to the caller to present again remaining data.
+*  If `output.pos < output.size`, decoder has flushed everything it could.
+*  @return : 0 when a frame is completely decoded and fully flushed,
+*            an error code, which can be tested using ZSTD_isError(),
+*            any other value > 0, which means there is still some decoding to do to complete current frame.
+*            The return value is a suggested next input size (a hint to improve latency) that will never load more than the current frame.
+* *******************************************************************************/
+
+typedef ZSTD_DCtx ZSTD_DStream;  /**< DCtx and DStream are now effectively same object (>= v1.3.0) */
+                                 /* For compatibility with versions <= v1.2.0, continue to consider them separated. */
+/*===== ZSTD_DStream management functions =====*/
+ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
+ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds);
+
+/*===== Streaming decompression functions =====*/
+ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
+ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+
+ZSTDLIB_API size_t ZSTD_DStreamInSize(void);    /*!< recommended size for input buffer */
+ZSTDLIB_API size_t ZSTD_DStreamOutSize(void);   /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
+
+#endif  /* ZSTD_H_235446 */
+
+
+
+/****************************************************************************************
+ * START OF ADVANCED AND EXPERIMENTAL FUNCTIONS
+ * The definitions in this section are considered experimental.
+ * They should never be used with a dynamic library, as prototypes may change in the future.
+ * They are provided for advanced scenarios.
+ * Use them only in association with static linking.
+ * ***************************************************************************************/
+
+#if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY)
+#define ZSTD_H_ZSTD_STATIC_LINKING_ONLY
+
+/* --- Constants ---*/
+#define ZSTD_MAGICNUMBER            0xFD2FB528   /* >= v0.8.0 */
+#define ZSTD_MAGIC_SKIPPABLE_START  0x184D2A50U
+#define ZSTD_MAGIC_DICTIONARY       0xEC30A437   /* >= v0.7.0 */
+
+#define ZSTD_WINDOWLOG_MAX_32   30
+#define ZSTD_WINDOWLOG_MAX_64   31
+#define ZSTD_WINDOWLOG_MAX    ((unsigned)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64))
+#define ZSTD_WINDOWLOG_MIN      10
+#define ZSTD_HASHLOG_MAX      ((ZSTD_WINDOWLOG_MAX < 30) ? ZSTD_WINDOWLOG_MAX : 30)
+#define ZSTD_HASHLOG_MIN         6
+#define ZSTD_CHAINLOG_MAX_32    29
+#define ZSTD_CHAINLOG_MAX_64    30
+#define ZSTD_CHAINLOG_MAX     ((unsigned)(sizeof(size_t) == 4 ? ZSTD_CHAINLOG_MAX_32 : ZSTD_CHAINLOG_MAX_64))
+#define ZSTD_CHAINLOG_MIN       ZSTD_HASHLOG_MIN
+#define ZSTD_HASHLOG3_MAX       17
+#define ZSTD_SEARCHLOG_MAX     (ZSTD_WINDOWLOG_MAX-1)
+#define ZSTD_SEARCHLOG_MIN       1
+#define ZSTD_SEARCHLENGTH_MAX    7   /* only for ZSTD_fast, other strategies are limited to 6 */
+#define ZSTD_SEARCHLENGTH_MIN    3   /* only for ZSTD_btopt, other strategies are limited to 4 */
+#define ZSTD_TARGETLENGTH_MIN    1   /* only used by btopt, btultra and btfast */
+#define ZSTD_LDM_MINMATCH_MIN    4
+#define ZSTD_LDM_MINMATCH_MAX 4096
+#define ZSTD_LDM_BUCKETSIZELOG_MAX 8
+
+#define ZSTD_FRAMEHEADERSIZE_PREFIX 5   /* minimum input size to know frame header size */
+#define ZSTD_FRAMEHEADERSIZE_MIN    6
+#define ZSTD_FRAMEHEADERSIZE_MAX   18   /* for static allocation */
+static const size_t ZSTD_frameHeaderSize_prefix = ZSTD_FRAMEHEADERSIZE_PREFIX;
+static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN;
+static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX;
+static const size_t ZSTD_skippableHeaderSize = 8;  /* magic number + skippable frame length */
+
+
+/*--- Advanced types ---*/
+typedef enum { ZSTD_fast=1, ZSTD_dfast, ZSTD_greedy, ZSTD_lazy, ZSTD_lazy2,
+               ZSTD_btlazy2, ZSTD_btopt, ZSTD_btultra } ZSTD_strategy;   /* from faster to stronger */
+
+typedef struct {
+    unsigned windowLog;      /**< largest match distance : larger == more compression, more memory needed during decompression */
+    unsigned chainLog;       /**< fully searched segment : larger == more compression, slower, more memory (useless for fast) */
+    unsigned hashLog;        /**< dispatch table : larger == faster, more memory */
+    unsigned searchLog;      /**< nb of searches : larger == more compression, slower */
+    unsigned searchLength;   /**< match length searched : larger == faster decompression, sometimes less compression */
+    unsigned targetLength;   /**< acceptable match size for optimal parser (only) : larger == more compression, slower */
+    ZSTD_strategy strategy;
+} ZSTD_compressionParameters;
+
+typedef struct {
+    unsigned contentSizeFlag; /**< 1: content size will be in frame header (when known) */
+    unsigned checksumFlag;    /**< 1: generate a 32-bits checksum at end of frame, for error detection */
+    unsigned noDictIDFlag;    /**< 1: no dictID will be saved into frame header (if dictionary compression) */
+} ZSTD_frameParameters;
+
+typedef struct {
+    ZSTD_compressionParameters cParams;
+    ZSTD_frameParameters fParams;
+} ZSTD_parameters;
+
+typedef struct ZSTD_CCtx_params_s ZSTD_CCtx_params;
+
+typedef enum {
+    ZSTD_dct_auto=0,      /* dictionary is "full" when starting with ZSTD_MAGIC_DICTIONARY, otherwise it is "rawContent" */
+    ZSTD_dct_rawContent,  /* ensures dictionary is always loaded as rawContent, even if it starts with ZSTD_MAGIC_DICTIONARY */
+    ZSTD_dct_fullDict     /* refuses to load a dictionary if it does not respect Zstandard's specification */
+} ZSTD_dictContentType_e;
+
+typedef enum {
+    ZSTD_dlm_byCopy = 0, /**< Copy dictionary content internally */
+    ZSTD_dlm_byRef,      /**< Reference dictionary content -- the dictionary buffer must outlive its users. */
+} ZSTD_dictLoadMethod_e;
+
+
+
+/***************************************
+*  Frame size functions
+***************************************/
+
+/*! ZSTD_findFrameCompressedSize() :
+ *  `src` should point to the start of a ZSTD encoded frame or skippable frame
+ *  `srcSize` must be >= first frame size
+ *  @return : the compressed size of the first frame starting at `src`,
+ *            suitable to pass to `ZSTD_decompress` or similar,
+ *            or an error code if input is invalid */
+ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
+
+/*! ZSTD_findDecompressedSize() :
+ *  `src` should point the start of a series of ZSTD encoded and/or skippable frames
+ *  `srcSize` must be the _exact_ size of this series
+ *       (i.e. there should be a frame boundary exactly at `srcSize` bytes after `src`)
+ *  @return : - decompressed size of all data in all successive frames
+ *            - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
+ *            - if an error occurred: ZSTD_CONTENTSIZE_ERROR
+ *
+ *   note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
+ *            When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
+ *            In which case, it's necessary to use streaming mode to decompress data.
+ *   note 2 : decompressed size is always present when compression is done with ZSTD_compress()
+ *   note 3 : decompressed size can be very large (64-bits value),
+ *            potentially larger than what local system can handle as a single memory segment.
+ *            In which case, it's necessary to use streaming mode to decompress data.
+ *   note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
+ *            Always ensure result fits within application's authorized limits.
+ *            Each application can set its own limits.
+ *   note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
+ *            read each contained frame header.  This is fast as most of the data is skipped,
+ *            however it does mean that all frame data must be present and valid. */
+ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
+
+/*! ZSTD_frameHeaderSize() :
+*   `src` should point to the start of a ZSTD frame
+*   `srcSize` must be >= ZSTD_frameHeaderSize_prefix.
+*   @return : size of the Frame Header */
+ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
+
+
+/***************************************
+*  Memory management
+***************************************/
+
+/*! ZSTD_sizeof_*() :
+ *  These functions give the current memory usage of selected object.
+ *  Object memory usage can evolve when re-used. */
+ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
+ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
+ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
+ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
+ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
+
+/*! ZSTD_estimate*() :
+ *  These functions make it possible to estimate memory usage
+ *  of a future {D,C}Ctx, before its creation.
+ *  ZSTD_estimateCCtxSize() will provide a budget large enough for any compression level up to selected one.
+ *  It will also consider src size to be arbitrarily "large", which is worst case.
+ *  If srcSize is known to always be small, ZSTD_estimateCCtxSize_usingCParams() can provide a tighter estimation.
+ *  ZSTD_estimateCCtxSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
+ *  ZSTD_estimateCCtxSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_p_nbWorkers is >= 1.
+ *  Note : CCtx size estimation is only correct for single-threaded compression. */
+ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
+ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
+ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
+ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
+
+/*! ZSTD_estimateCStreamSize() :
+ *  ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.
+ *  It will also consider src size to be arbitrarily "large", which is worst case.
+ *  If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.
+ *  ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.
+ *  ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParam_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_p_nbWorkers is >= 1.
+ *  Note : CStream size estimation is only correct for single-threaded compression.
+ *  ZSTD_DStream memory budget depends on window Size.
+ *  This information can be passed manually, using ZSTD_estimateDStreamSize,
+ *  or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();
+ *  Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
+ *         an internal ?Dict will be created, which additional size is not estimated here.
+ *         In this case, get total size by adding ZSTD_estimate?DictSize */
+ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
+ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
+ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
+ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
+ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
+
+/*! ZSTD_estimate?DictSize() :
+ *  ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict().
+ *  ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced().
+ *  Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller.
+ */
+ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
+ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
+
+/*! ZSTD_initStatic*() :
+ *  Initialize an object using a pre-allocated fixed-size buffer.
+ *  workspace: The memory area to emplace the object into.
+ *             Provided pointer *must be 8-bytes aligned*.
+ *             Buffer must outlive object.
+ *  workspaceSize: Use ZSTD_estimate*Size() to determine
+ *                 how large workspace must be to support target scenario.
+ * @return : pointer to object (same address as workspace, just different type),
+ *           or NULL if error (size too small, incorrect alignment, etc.)
+ *  Note : zstd will never resize nor malloc() when using a static buffer.
+ *         If the object requires more memory than available,
+ *         zstd will just error out (typically ZSTD_error_memory_allocation).
+ *  Note 2 : there is no corresponding "free" function.
+ *           Since workspace is allocated externally, it must be freed externally too.
+ *  Note 3 : cParams : use ZSTD_getCParams() to convert a compression level
+ *           into its associated cParams.
+ *  Limitation 1 : currently not compatible with internal dictionary creation, triggered by
+ *                 ZSTD_CCtx_loadDictionary(), ZSTD_initCStream_usingDict() or ZSTD_initDStream_usingDict().
+ *  Limitation 2 : static cctx currently not compatible with multi-threading.
+ *  Limitation 3 : static dctx is incompatible with legacy support.
+ */
+ZSTDLIB_API ZSTD_CCtx*    ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
+ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize);    /**< same as ZSTD_initStaticCCtx() */
+
+ZSTDLIB_API ZSTD_DCtx*    ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
+ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize);    /**< same as ZSTD_initStaticDCtx() */
+
+ZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict(
+                                        void* workspace, size_t workspaceSize,
+                                        const void* dict, size_t dictSize,
+                                        ZSTD_dictLoadMethod_e dictLoadMethod,
+                                        ZSTD_dictContentType_e dictContentType,
+                                        ZSTD_compressionParameters cParams);
+
+ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict(
+                                        void* workspace, size_t workspaceSize,
+                                        const void* dict, size_t dictSize,
+                                        ZSTD_dictLoadMethod_e dictLoadMethod,
+                                        ZSTD_dictContentType_e dictContentType);
+
+/*! Custom memory allocation :
+ *  These prototypes make it possible to pass your own allocation/free functions.
+ *  ZSTD_customMem is provided at creation time, using ZSTD_create*_advanced() variants listed below.
+ *  All allocation/free operations will be completed using these custom variants instead of regular <stdlib.h> ones.
+ */
+typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
+typedef void  (*ZSTD_freeFunction) (void* opaque, void* address);
+typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
+static ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL };  /**< this constant defers to stdlib's functions */
+
+ZSTDLIB_API ZSTD_CCtx*    ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
+ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
+ZSTDLIB_API ZSTD_DCtx*    ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
+ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
+
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize,
+                                                  ZSTD_dictLoadMethod_e dictLoadMethod,
+                                                  ZSTD_dictContentType_e dictContentType,
+                                                  ZSTD_compressionParameters cParams,
+                                                  ZSTD_customMem customMem);
+
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
+                                                  ZSTD_dictLoadMethod_e dictLoadMethod,
+                                                  ZSTD_dictContentType_e dictContentType,
+                                                  ZSTD_customMem customMem);
+
+
+
+/***************************************
+*  Advanced compression functions
+***************************************/
+
+/*! ZSTD_createCDict_byReference() :
+ *  Create a digested dictionary for compression
+ *  Dictionary content is simply referenced, and therefore stays in dictBuffer.
+ *  It is important that dictBuffer outlives CDict, it must remain read accessible throughout the lifetime of CDict */
+ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
+
+/*! ZSTD_getCParams() :
+*   @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
+*   `estimatedSrcSize` value is optional, select 0 if not known */
+ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
+
+/*! ZSTD_getParams() :
+*   same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`.
+*   All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */
+ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
+
+/*! ZSTD_checkCParams() :
+*   Ensure param values remain within authorized range */
+ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
+
+/*! ZSTD_adjustCParams() :
+ *  optimize params for a given `srcSize` and `dictSize`.
+ *  both values are optional, select `0` if unknown. */
+ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
+
+/*! ZSTD_compress_advanced() :
+*   Same as ZSTD_compress_usingDict(), with fine-tune control over each compression parameter */
+ZSTDLIB_API size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
+                                  void* dst, size_t dstCapacity,
+                            const void* src, size_t srcSize,
+                            const void* dict,size_t dictSize,
+                                  ZSTD_parameters params);
+
+/*! ZSTD_compress_usingCDict_advanced() :
+*   Same as ZSTD_compress_usingCDict(), with fine-tune control over frame parameters */
+ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
+                                  void* dst, size_t dstCapacity,
+                            const void* src, size_t srcSize,
+                            const ZSTD_CDict* cdict, ZSTD_frameParameters fParams);
+
+
+/*--- Advanced decompression functions ---*/
+
+/*! ZSTD_isFrame() :
+ *  Tells if the content of `buffer` starts with a valid Frame Identifier.
+ *  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
+ *  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
+ *  Note 3 : Skippable Frame Identifiers are considered valid. */
+ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
+
+/*! ZSTD_createDDict_byReference() :
+ *  Create a digested dictionary, ready to start decompression operation without startup delay.
+ *  Dictionary content is referenced, and therefore stays in dictBuffer.
+ *  It is important that dictBuffer outlives DDict,
+ *  it must remain read accessible throughout the lifetime of DDict */
+ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
+
+
+/*! ZSTD_getDictID_fromDict() :
+ *  Provides the dictID stored within dictionary.
+ *  if @return == 0, the dictionary is not conformant with Zstandard specification.
+ *  It can still be loaded, but as a content-only dictionary. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
+
+/*! ZSTD_getDictID_fromDDict() :
+ *  Provides the dictID of the dictionary loaded into `ddict`.
+ *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
+
+/*! ZSTD_getDictID_fromFrame() :
+ *  Provides the dictID required to decompressed the frame stored within `src`.
+ *  If @return == 0, the dictID could not be decoded.
+ *  This could for one of the following reasons :
+ *  - The frame does not require a dictionary to be decoded (most common case).
+ *  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
+ *    Note : this use case also happens when using a non-conformant dictionary.
+ *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
+ *  - This is not a Zstandard frame.
+ *  When identifying the exact failure cause, it's possible to use ZSTD_getFrameHeader(), which will provide a more precise error code. */
+ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
+
+
+/********************************************************************
+*  Advanced streaming functions
+********************************************************************/
+
+/*=====   Advanced Streaming compression functions  =====*/
+ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize);   /**< pledgedSrcSize must be correct. If it is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs, "0" also disables frame content size field. It may be enabled in the future. */
+ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**< creates of an internal CDict (incompatible with static CCtx), except if dict == NULL or dictSize < 8, in which case no dict is used. Note: dict is loaded with ZSTD_dm_auto (treated as a full zstd dictionary if it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy.*/
+ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
+                                             ZSTD_parameters params, unsigned long long pledgedSrcSize);  /**< pledgedSrcSize must be correct. If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. dict is loaded with ZSTD_dm_auto and ZSTD_dlm_byCopy. */
+ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);  /**< note : cdict will just be referenced, and must outlive compression session */
+ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize);  /**< same as ZSTD_initCStream_usingCDict(), with control over frame parameters. pledgedSrcSize must be correct. If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. */
+
+/*! ZSTD_resetCStream() :
+ *  start a new compression job, using same parameters from previous job.
+ *  This is typically useful to skip dictionary loading stage, since it will re-use it in-place..
+ *  Note that zcs must be init at least once before using ZSTD_resetCStream().
+ *  If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.
+ *  If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
+ *  For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs,
+ *  but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()) */
+ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
+
+
+typedef struct {
+    unsigned long long ingested;
+    unsigned long long consumed;
+    unsigned long long produced;
+} ZSTD_frameProgression;
+
+/* ZSTD_getFrameProgression():
+ * tells how much data has been ingested (read from input)
+ * consumed (input actually compressed) and produced (output) for current frame.
+ * Therefore, (ingested - consumed) is amount of input data buffered internally, not yet compressed.
+ * Can report progression inside worker threads (multi-threading and non-blocking mode).
+ */
+ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx);
+
+
+
+/*=====   Advanced Streaming decompression functions  =====*/
+typedef enum { DStream_p_maxWindowSize } ZSTD_DStreamParameter_e;
+ZSTDLIB_API size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue);   /* obsolete : this API will be removed in a future version */
+ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /**< note: no dictionary will be used if dict == NULL or dictSize < 8 */
+ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);  /**< note : ddict is referenced, it must outlive decompression session */
+ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);  /**< re-use decompression parameters from previous init; saves dictionary loading */
+
+
+/*********************************************************************
+*  Buffer-less and synchronous inner streaming functions
+*
+*  This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
+*  But it's also a complex one, with several restrictions, documented below.
+*  Prefer normal streaming API for an easier experience.
+********************************************************************* */
+
+/**
+  Buffer-less streaming compression (synchronous mode)
+
+  A ZSTD_CCtx object is required to track streaming operations.
+  Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
+  ZSTD_CCtx object can be re-used multiple times within successive compression operations.
+
+  Start by initializing a context.
+  Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression,
+  or ZSTD_compressBegin_advanced(), for finer parameter control.
+  It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
+
+  Then, consume your input using ZSTD_compressContinue().
+  There are some important considerations to keep in mind when using this advanced function :
+  - ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.
+  - Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.
+  - Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.
+    Worst case evaluation is provided by ZSTD_compressBound().
+    ZSTD_compressContinue() doesn't guarantee recover after a failed compression.
+  - ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).
+    It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)
+  - ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.
+    In which case, it will "discard" the relevant memory section from its history.
+
+  Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.
+  It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
+  Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
+
+  `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
+*/
+
+/*=====   Buffer-less streaming compression functions  =====*/
+ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
+ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
+ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
+ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize);   /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
+ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**<  note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
+
+ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+/*-
+  Buffer-less streaming decompression (synchronous mode)
+
+  A ZSTD_DCtx object is required to track streaming operations.
+  Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
+  A ZSTD_DCtx object can be re-used multiple times.
+
+  First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
+  Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
+  Data fragment must be large enough to ensure successful decoding.
+ `ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
+  @result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
+           >0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
+           errorCode, which can be tested using ZSTD_isError().
+
+  It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
+  such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
+  Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
+  As a consequence, check that values remain within valid application range.
+  For example, do not allocate memory blindly, check that `windowSize` is within expectation.
+  Each application can set its own limits, depending on local restrictions.
+  For extended interoperability, it is recommended to support `windowSize` of at least 8 MB.
+
+  ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.
+  ZSTD_decompressContinue() is very sensitive to contiguity,
+  if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
+  or that previous contiguous segment is large enough to properly handle maximum back-reference distance.
+  There are multiple ways to guarantee this condition.
+
+  The most memory efficient way is to use a round buffer of sufficient size.
+  Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
+  which can @return an error code if required value is too large for current system (in 32-bits mode).
+  In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
+  up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
+  which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
+  At which point, decoding can resume from the beginning of the buffer.
+  Note that already decoded data stored in the buffer should be flushed before being overwritten.
+
+  There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.
+
+  Finally, if you control the compression process, you can also ignore all buffer size rules,
+  as long as the encoder and decoder progress in "lock-step",
+  aka use exactly the same buffer sizes, break contiguity at the same place, etc.
+
+  Once buffers are setup, start decompression, with ZSTD_decompressBegin().
+  If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().
+
+  Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
+  ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+  ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
+
+ @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
+  It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
+  It can also be an error code, which can be tested with ZSTD_isError().
+
+  A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
+  Context can then be reset to start a new decompression.
+
+  Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().
+  This information is not required to properly decode a frame.
+
+  == Special case : skippable frames ==
+
+  Skippable frames allow integration of user-defined data into a flow of concatenated frames.
+  Skippable frames will be ignored (skipped) by decompressor.
+  The format of skippable frames is as follows :
+  a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
+  b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
+  c) Frame Content - any content (User Data) of length equal to Frame Size
+  For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.
+  For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
+*/
+
+/*=====   Buffer-less streaming decompression functions  =====*/
+typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
+typedef struct {
+    unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
+    unsigned long long windowSize;       /* can be very large, up to <= frameContentSize */
+    unsigned blockSizeMax;
+    ZSTD_frameType_e frameType;          /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
+    unsigned headerSize;
+    unsigned dictID;
+    unsigned checksumFlag;
+} ZSTD_frameHeader;
+ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize);   /**< doesn't consume input */
+ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize);  /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
+
+ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
+
+ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
+ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+/* misc */
+ZSTDLIB_API void   ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
+typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
+ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
+
+
+
+/* ============================================ */
+/**       New advanced API (experimental)       */
+/* ============================================ */
+
+/* notes on API design :
+ *   In this proposal, parameters are pushed one by one into an existing context,
+ *   and then applied on all subsequent compression jobs.
+ *   When no parameter is ever provided, CCtx is created with compression level ZSTD_CLEVEL_DEFAULT.
+ *
+ *   This API is intended to replace all others advanced / experimental API entry points.
+ *   But it stands a reasonable chance to become "stable", after a reasonable testing period.
+ */
+
+/* note on naming convention :
+ *   Initially, the API favored names like ZSTD_setCCtxParameter() .
+ *   In this proposal, convention is changed towards ZSTD_CCtx_setParameter() .
+ *   The main driver is that it identifies more clearly the target object type.
+ *   It feels clearer when considering multiple targets :
+ *   ZSTD_CDict_setParameter() (rather than ZSTD_setCDictParameter())
+ *   ZSTD_CCtxParams_setParameter()  (rather than ZSTD_setCCtxParamsParameter() )
+ *   etc...
+ */
+
+/* note on enum design :
+ * All enum will be pinned to explicit values before reaching "stable API" status */
+
+typedef enum {
+    /* Opened question : should we have a format ZSTD_f_auto ?
+     * Today, it would mean exactly the same as ZSTD_f_zstd1.
+     * But, in the future, should several formats become supported,
+     * on the compression side, it would mean "default format".
+     * On the decompression side, it would mean "automatic format detection",
+     * so that ZSTD_f_zstd1 would mean "accept *only* zstd frames".
+     * Since meaning is a little different, another option could be to define different enums for compression and decompression.
+     * This question could be kept for later, when there are actually multiple formats to support,
+     * but there is also the question of pinning enum values, and pinning value `0` is especially important */
+    ZSTD_f_zstd1 = 0,        /* zstd frame format, specified in zstd_compression_format.md (default) */
+    ZSTD_f_zstd1_magicless,  /* Variant of zstd frame format, without initial 4-bytes magic number.
+                              * Useful to save 4 bytes per generated frame.
+                              * Decoder cannot recognise automatically this format, requiring instructions. */
+} ZSTD_format_e;
+
+typedef enum {
+    /* compression format */
+    ZSTD_p_format = 10,      /* See ZSTD_format_e enum definition.
+                              * Cast selected format as unsigned for ZSTD_CCtx_setParameter() compatibility. */
+
+    /* compression parameters */
+    ZSTD_p_compressionLevel=100, /* Update all compression parameters according to pre-defined cLevel table
+                              * Default level is ZSTD_CLEVEL_DEFAULT==3.
+                              * Special: value 0 means "do not change cLevel".
+                              * Note 1 : it's possible to pass a negative compression level by casting it to unsigned type.
+                              * Note 2 : setting a level sets all default values of other compression parameters.
+                              * Note 3 : setting compressionLevel automatically updates ZSTD_p_compressLiterals. */
+    ZSTD_p_windowLog,        /* Maximum allowed back-reference distance, expressed as power of 2.
+                              * Must be clamped between ZSTD_WINDOWLOG_MIN and ZSTD_WINDOWLOG_MAX.
+                              * Special: value 0 means "use default windowLog".
+                              * Note: Using a window size greater than ZSTD_MAXWINDOWSIZE_DEFAULT (default: 2^27)
+                              *       requires explicitly allowing such window size during decompression stage. */
+    ZSTD_p_hashLog,          /* Size of the probe table, as a power of 2.
+                              * Resulting table size is (1 << (hashLog+2)).
+                              * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX.
+                              * Larger tables improve compression ratio of strategies <= dFast,
+                              * and improve speed of strategies > dFast.
+                              * Special: value 0 means "use default hashLog". */
+    ZSTD_p_chainLog,         /* Size of the full-search table, as a power of 2.
+                              * Resulting table size is (1 << (chainLog+2)).
+                              * Larger tables result in better and slower compression.
+                              * This parameter is useless when using "fast" strategy.
+                              * Special: value 0 means "use default chainLog". */
+    ZSTD_p_searchLog,        /* Number of search attempts, as a power of 2.
+                              * More attempts result in better and slower compression.
+                              * This parameter is useless when using "fast" and "dFast" strategies.
+                              * Special: value 0 means "use default searchLog". */
+    ZSTD_p_minMatch,         /* Minimum size of searched matches (note : repCode matches can be smaller).
+                              * Larger values make faster compression and decompression, but decrease ratio.
+                              * Must be clamped between ZSTD_SEARCHLENGTH_MIN and ZSTD_SEARCHLENGTH_MAX.
+                              * Note that currently, for all strategies < btopt, effective minimum is 4.
+                              *                    , for all strategies > fast, effective maximum is 6.
+                              * Special: value 0 means "use default minMatchLength". */
+    ZSTD_p_targetLength,     /* Impact of this field depends on strategy.
+                              * For strategies btopt & btultra:
+                              *     Length of Match considered "good enough" to stop search.
+                              *     Larger values make compression stronger, and slower.
+                              * For strategy fast:
+                              *     Distance between match sampling.
+                              *     Larger values make compression faster, and weaker.
+                              * Special: value 0 means "use default targetLength". */
+    ZSTD_p_compressionStrategy, /* See ZSTD_strategy enum definition.
+                              * Cast selected strategy as unsigned for ZSTD_CCtx_setParameter() compatibility.
+                              * The higher the value of selected strategy, the more complex it is,
+                              * resulting in stronger and slower compression.
+                              * Special: value 0 means "use default strategy". */
+
+    ZSTD_p_enableLongDistanceMatching=160, /* Enable long distance matching.
+                                         * This parameter is designed to improve compression ratio
+                                         * for large inputs, by finding large matches at long distance.
+                                         * It increases memory usage and window size.
+                                         * Note: enabling this parameter increases ZSTD_p_windowLog to 128 MB
+                                         * except when expressly set to a different value. */
+    ZSTD_p_ldmHashLog,       /* Size of the table for long distance matching, as a power of 2.
+                              * Larger values increase memory usage and compression ratio,
+                              * but decrease compression speed.
+                              * Must be clamped between ZSTD_HASHLOG_MIN and ZSTD_HASHLOG_MAX
+                              * default: windowlog - 7.
+                              * Special: value 0 means "automatically determine hashlog". */
+    ZSTD_p_ldmMinMatch,      /* Minimum match size for long distance matcher.
+                              * Larger/too small values usually decrease compression ratio.
+                              * Must be clamped between ZSTD_LDM_MINMATCH_MIN and ZSTD_LDM_MINMATCH_MAX.
+                              * Special: value 0 means "use default value" (default: 64). */
+    ZSTD_p_ldmBucketSizeLog, /* Log size of each bucket in the LDM hash table for collision resolution.
+                              * Larger values improve collision resolution but decrease compression speed.
+                              * The maximum value is ZSTD_LDM_BUCKETSIZELOG_MAX .
+                              * Special: value 0 means "use default value" (default: 3). */
+    ZSTD_p_ldmHashEveryLog,  /* Frequency of inserting/looking up entries in the LDM hash table.
+                              * Must be clamped between 0 and (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN).
+                              * Default is MAX(0, (windowLog - ldmHashLog)), optimizing hash table usage.
+                              * Larger values improve compression speed.
+                              * Deviating far from default value will likely result in a compression ratio decrease.
+                              * Special: value 0 means "automatically determine hashEveryLog". */
+
+    /* frame parameters */
+    ZSTD_p_contentSizeFlag=200, /* Content size will be written into frame header _whenever known_ (default:1)
+                              * Content size must be known at the beginning of compression,
+                              * it is provided using ZSTD_CCtx_setPledgedSrcSize() */
+    ZSTD_p_checksumFlag,     /* A 32-bits checksum of content is written at end of frame (default:0) */
+    ZSTD_p_dictIDFlag,       /* When applicable, dictionary's ID is written into frame header (default:1) */
+
+    /* multi-threading parameters */
+    /* These parameters are only useful if multi-threading is enabled (ZSTD_MULTITHREAD).
+     * They return an error otherwise. */
+    ZSTD_p_nbWorkers=400,    /* Select how many threads will be spawned to compress in parallel.
+                              * When nbWorkers >= 1, triggers asynchronous mode :
+                              * ZSTD_compress_generic() consumes some input, flush some output if possible, and immediately gives back control to caller,
+                              * while compression work is performed in parallel, within worker threads.
+                              * (note : a strong exception to this rule is when first invocation sets ZSTD_e_end : it becomes a blocking call).
+                              * More workers improve speed, but also increase memory usage.
+                              * Default value is `0`, aka "single-threaded mode" : no worker is spawned, compression is performed inside Caller's thread, all invocations are blocking */
+    ZSTD_p_jobSize,          /* Size of a compression job. This value is enforced only in non-blocking mode.
+                              * Each compression job is completed in parallel, so this value indirectly controls the nb of active threads.
+                              * 0 means default, which is dynamically determined based on compression parameters.
+                              * Job size must be a minimum of overlapSize, or 1 MB, whichever is largest.
+                              * The minimum size is automatically and transparently enforced */
+    ZSTD_p_overlapSizeLog,   /* Size of previous input reloaded at the beginning of each job.
+                              * 0 => no overlap, 6(default) => use 1/8th of windowSize, >=9 => use full windowSize */
+
+    /* =================================================================== */
+    /* experimental parameters - no stability guaranteed                   */
+    /* =================================================================== */
+
+    ZSTD_p_compressLiterals=1000, /* control huffman compression of literals (enabled) by default.
+                              * disabling it improves speed and decreases compression ratio by a large amount.
+                              * note : this setting is automatically updated when changing compression level.
+                              *        positive compression levels set ZSTD_p_compressLiterals to 1.
+                              *        negative compression levels set ZSTD_p_compressLiterals to 0. */
+
+    ZSTD_p_forceMaxWindow=1100, /* Force back-reference distances to remain < windowSize,
+                              * even when referencing into Dictionary content (default:0) */
+
+} ZSTD_cParameter;
+
+
+/*! ZSTD_CCtx_setParameter() :
+ *  Set one compression parameter, selected by enum ZSTD_cParameter.
+ *  Setting a parameter is generally only possible during frame initialization (before starting compression),
+ *  except for a few exceptions which can be updated during compression: compressionLevel, hashLog, chainLog, searchLog, minMatch, targetLength and strategy.
+ *  Note : when `value` is an enum, cast it to unsigned for proper type checking.
+ *  @result : informational value (typically, value being set clamped correctly),
+ *            or an error code (which can be tested with ZSTD_isError()). */
+ZSTDLIB_API size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned value);
+
+/*! ZSTD_CCtx_setPledgedSrcSize() :
+ *  Total input data size to be compressed as a single frame.
+ *  This value will be controlled at the end, and result in error if not respected.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ *  Note 1 : 0 means zero, empty.
+ *           In order to mean "unknown content size", pass constant ZSTD_CONTENTSIZE_UNKNOWN.
+ *           ZSTD_CONTENTSIZE_UNKNOWN is default value for any new compression job.
+ *  Note 2 : If all data is provided and consumed in a single round,
+ *           this value is overriden by srcSize instead. */
+ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize);
+
+/*! ZSTD_CCtx_loadDictionary() :
+ *  Create an internal CDict from `dict` buffer.
+ *  Decompression will have to use same dictionary.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ *  Special: Adding a NULL (or 0-size) dictionary invalidates previous dictionary,
+ *           meaning "return to no-dictionary mode".
+ *  Note 1 : Dictionary will be used for all future compression jobs.
+ *           To return to "no-dictionary" situation, load a NULL dictionary
+ *  Note 2 : Loading a dictionary involves building tables, which are dependent on compression parameters.
+ *           For this reason, compression parameters cannot be changed anymore after loading a dictionary.
+ *           It's also a CPU consuming operation, with non-negligible impact on latency.
+ *  Note 3 :`dict` content will be copied internally.
+ *           Use ZSTD_CCtx_loadDictionary_byReference() to reference dictionary content instead.
+ *           In such a case, dictionary buffer must outlive its users.
+ *  Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
+ *           to precisely select how dictionary content must be interpreted. */
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
+
+
+/*! ZSTD_CCtx_refCDict() :
+ *  Reference a prepared dictionary, to be used for all next compression jobs.
+ *  Note that compression parameters are enforced from within CDict,
+ *  and supercede any compression parameter previously set within CCtx.
+ *  The dictionary will remain valid for future compression jobs using same CCtx.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ *  Special : adding a NULL CDict means "return to no-dictionary mode".
+ *  Note 1 : Currently, only one dictionary can be managed.
+ *           Adding a new dictionary effectively "discards" any previous one.
+ *  Note 2 : CDict is just referenced, its lifetime must outlive CCtx. */
+ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
+
+/*! ZSTD_CCtx_refPrefix() :
+ *  Reference a prefix (single-usage dictionary) for next compression job.
+ *  Decompression need same prefix to properly regenerate data.
+ *  Prefix is **only used once**. Tables are discarded at end of compression job.
+ *  Subsequent compression jobs will be done without prefix (if none is explicitly referenced).
+ *  If there is a need to use same prefix multiple times, consider embedding it into a ZSTD_CDict instead.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ *  Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
+ *  Note 1 : Prefix buffer is referenced. It must outlive compression job.
+ *  Note 2 : Referencing a prefix involves building tables, which are dependent on compression parameters.
+ *           It's a CPU consuming operation, with non-negligible impact on latency.
+ *  Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent).
+ *           Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode. */
+ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize);
+ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
+
+/*! ZSTD_CCtx_reset() :
+ *  Return a CCtx to clean state.
+ *  Useful after an error, or to interrupt an ongoing compression job and start a new one.
+ *  Any internal data not yet flushed is cancelled.
+ *  Dictionary (if any) is dropped.
+ *  All parameters are back to default values.
+ *  It's possible to modify compression parameters after a reset.
+ */
+ZSTDLIB_API void ZSTD_CCtx_reset(ZSTD_CCtx* cctx);
+
+
+
+typedef enum {
+    ZSTD_e_continue=0, /* collect more data, encoder decides when to output compressed result, for optimal conditions */
+    ZSTD_e_flush,      /* flush any data provided so far - frame will continue, future data can still reference previous data for better compression */
+    ZSTD_e_end         /* flush any remaining data and close current frame. Any additional data starts a new frame. */
+} ZSTD_EndDirective;
+
+/*! ZSTD_compress_generic() :
+ *  Behave about the same as ZSTD_compressStream. To note :
+ *  - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_setParameter()
+ *  - Compression parameters cannot be changed once compression is started.
+ *  - outpot->pos must be <= dstCapacity, input->pos must be <= srcSize
+ *  - outpot->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.
+ *  - In single-thread mode (default), function is blocking : it completed its job before returning to caller.
+ *  - In multi-thread mode, function is non-blocking : it just acquires a copy of input, and distribute job to internal worker threads,
+ *                                                     and then immediately returns, just indicating that there is some data remaining to be flushed.
+ *                                                     The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.
+ *  - Exception : in multi-threading mode, if the first call requests a ZSTD_e_end directive, it is blocking : it will complete compression before giving back control to caller.
+ *  - @return provides a minimum amount of data remaining to be flushed from internal buffers
+ *            or an error code, which can be tested using ZSTD_isError().
+ *            if @return != 0, flush is not fully completed, there is still some data left within internal buffers.
+ *            This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.
+ *            For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.
+ *  - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),
+ *            only ZSTD_e_end or ZSTD_e_flush operations are allowed.
+ *            Before starting a new compression job, or changing compression parameters,
+ *            it is required to fully flush internal buffers.
+ */
+ZSTDLIB_API size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
+                                          ZSTD_outBuffer* output,
+                                          ZSTD_inBuffer* input,
+                                          ZSTD_EndDirective endOp);
+
+
+/*! ZSTD_compress_generic_simpleArgs() :
+ *  Same as ZSTD_compress_generic(),
+ *  but using only integral types as arguments.
+ *  Argument list is larger than ZSTD_{in,out}Buffer,
+ *  but can be helpful for binders from dynamic languages
+ *  which have troubles handling structures containing memory pointers.
+ */
+ZSTDLIB_API size_t ZSTD_compress_generic_simpleArgs (
+                            ZSTD_CCtx* cctx,
+                            void* dst, size_t dstCapacity, size_t* dstPos,
+                      const void* src, size_t srcSize, size_t* srcPos,
+                            ZSTD_EndDirective endOp);
+
+
+/*! ZSTD_CCtx_params :
+ *  Quick howto :
+ *  - ZSTD_createCCtxParams() : Create a ZSTD_CCtx_params structure
+ *  - ZSTD_CCtxParam_setParameter() : Push parameters one by one into
+ *                                    an existing ZSTD_CCtx_params structure.
+ *                                    This is similar to
+ *                                    ZSTD_CCtx_setParameter().
+ *  - ZSTD_CCtx_setParametersUsingCCtxParams() : Apply parameters to
+ *                                    an existing CCtx.
+ *                                    These parameters will be applied to
+ *                                    all subsequent compression jobs.
+ *  - ZSTD_compress_generic() : Do compression using the CCtx.
+ *  - ZSTD_freeCCtxParams() : Free the memory.
+ *
+ *  This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams()
+ *  for static allocation for single-threaded compression.
+ */
+ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
+ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params);
+
+
+/*! ZSTD_CCtxParams_reset() :
+ *  Reset params to default values.
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params);
+
+/*! ZSTD_CCtxParams_init() :
+ *  Initializes the compression parameters of cctxParams according to
+ *  compression level. All other parameters are reset to their default values.
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel);
+
+/*! ZSTD_CCtxParams_init_advanced() :
+ *  Initializes the compression and frame parameters of cctxParams according to
+ *  params. All other parameters are reset to their default values.
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params);
+
+
+/*! ZSTD_CCtxParam_setParameter() :
+ *  Similar to ZSTD_CCtx_setParameter.
+ *  Set one compression parameter, selected by enum ZSTD_cParameter.
+ *  Parameters must be applied to a ZSTD_CCtx using ZSTD_CCtx_setParametersUsingCCtxParams().
+ *  Note : when `value` is an enum, cast it to unsigned for proper type checking.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_CCtxParam_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, unsigned value);
+
+/*! ZSTD_CCtx_setParametersUsingCCtxParams() :
+ *  Apply a set of ZSTD_CCtx_params to the compression context.
+ *  This can be done even after compression is started,
+ *    if nbWorkers==0, this will have no impact until a new compression is started.
+ *    if nbWorkers>=1, new parameters will be picked up at next job,
+ *       with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated).
+ */
+ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams(
+        ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params);
+
+
+/*===   Advanced parameters for decompression API  ===*/
+
+/* The following parameters must be set after creating a ZSTD_DCtx* (or ZSTD_DStream*) object,
+ * but before starting decompression of a frame.
+ */
+
+/*! ZSTD_DCtx_loadDictionary() :
+ *  Create an internal DDict from dict buffer,
+ *  to be used to decompress next frames.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ *  Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
+ *            meaning "return to no-dictionary mode".
+ *  Note 1 : `dict` content will be copied internally.
+ *            Use ZSTD_DCtx_loadDictionary_byReference()
+ *            to reference dictionary content instead.
+ *            In which case, the dictionary buffer must outlive its users.
+ *  Note 2 : Loading a dictionary involves building tables,
+ *           which has a non-negligible impact on CPU usage and latency.
+ *  Note 3 : Use ZSTD_DCtx_loadDictionary_advanced() to select
+ *           how dictionary content will be interpreted and loaded.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
+ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
+
+
+/*! ZSTD_DCtx_refDDict() :
+ *  Reference a prepared dictionary, to be used to decompress next frames.
+ *  The dictionary remains active for decompression of future frames using same DCtx.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ *  Note 1 : Currently, only one dictionary can be managed.
+ *           Referencing a new dictionary effectively "discards" any previous one.
+ *  Special : adding a NULL DDict means "return to no-dictionary mode".
+ *  Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
+
+
+/*! ZSTD_DCtx_refPrefix() :
+ *  Reference a prefix (single-usage dictionary) for next compression job.
+ *  Prefix is **only used once**. It must be explicitly referenced before each frame.
+ *  If there is a need to use same prefix multiple times, consider embedding it into a ZSTD_DDict instead.
+ * @result : 0, or an error code (which can be tested with ZSTD_isError()).
+ *  Note 1 : Adding any prefix (including NULL) invalidates any previously set prefix or dictionary
+ *  Note 2 : Prefix buffer is referenced. It must outlive compression job.
+ *  Note 3 : By default, the prefix is treated as raw content (ZSTD_dm_rawContent).
+ *           Use ZSTD_CCtx_refPrefix_advanced() to alter dictMode.
+ *  Note 4 : Referencing a raw content prefix has almost no cpu nor memory cost.
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize);
+ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
+
+
+/*! ZSTD_DCtx_setMaxWindowSize() :
+ *  Refuses allocating internal buffers for frames requiring a window size larger than provided limit.
+ *  This is useful to prevent a decoder context from reserving too much memory for itself (potential attack scenario).
+ *  This parameter is only useful in streaming mode, since no internal buffer is allocated in direct mode.
+ *  By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_MAX)
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize);
+
+
+/*! ZSTD_DCtx_setFormat() :
+ *  Instruct the decoder context about what kind of data to decode next.
+ *  This instruction is mandatory to decode data without a fully-formed header,
+ *  such ZSTD_f_zstd1_magicless for example.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()).
+ */
+ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
+
+
+/*! ZSTD_decompress_generic() :
+ *  Behave the same as ZSTD_decompressStream.
+ *  Decompression parameters cannot be changed once decompression is started.
+ * @return : an error code, which can be tested using ZSTD_isError()
+ *           if >0, a hint, nb of expected input bytes for next invocation.
+ *           `0` means : a frame has just been fully decoded and flushed.
+ */
+ZSTDLIB_API size_t ZSTD_decompress_generic(ZSTD_DCtx* dctx,
+                                           ZSTD_outBuffer* output,
+                                           ZSTD_inBuffer* input);
+
+
+/*! ZSTD_decompress_generic_simpleArgs() :
+ *  Same as ZSTD_decompress_generic(),
+ *  but using only integral types as arguments.
+ *  Argument list is larger than ZSTD_{in,out}Buffer,
+ *  but can be helpful for binders from dynamic languages
+ *  which have troubles handling structures containing memory pointers.
+ */
+ZSTDLIB_API size_t ZSTD_decompress_generic_simpleArgs (
+                            ZSTD_DCtx* dctx,
+                            void* dst, size_t dstCapacity, size_t* dstPos,
+                      const void* src, size_t srcSize, size_t* srcPos);
+
+
+/*! ZSTD_DCtx_reset() :
+ *  Return a DCtx to clean state.
+ *  If a decompression was ongoing, any internal data not yet flushed is cancelled.
+ *  All parameters are back to default values, including sticky ones.
+ *  Dictionary (if any) is dropped.
+ *  Parameters can be modified again after a reset.
+ */
+ZSTDLIB_API void ZSTD_DCtx_reset(ZSTD_DCtx* dctx);
+
+
+
+/* ============================ */
+/**       Block level API       */
+/* ============================ */
+
+/*!
+    Block functions produce and decode raw zstd blocks, without frame metadata.
+    Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes).
+    User will have to take in charge required information to regenerate data, such as compressed and content sizes.
+
+    A few rules to respect :
+    - Compressing and decompressing require a context structure
+      + Use ZSTD_createCCtx() and ZSTD_createDCtx()
+    - It is necessary to init context before starting
+      + compression : any ZSTD_compressBegin*() variant, including with dictionary
+      + decompression : any ZSTD_decompressBegin*() variant, including with dictionary
+      + copyCCtx() and copyDCtx() can be used too
+    - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
+      + If input is larger than a block size, it's necessary to split input data into multiple blocks
+      + For inputs larger than a single block size, consider using the regular ZSTD_compress() instead.
+        Frame metadata is not that costly, and quickly becomes negligible as source size grows larger.
+    - When a block is considered not compressible enough, ZSTD_compressBlock() result will be zero.
+      In which case, nothing is produced into `dst`.
+      + User must test for such outcome and deal directly with uncompressed data
+      + ZSTD_decompressBlock() doesn't accept uncompressed data as input !!!
+      + In case of multiple successive blocks, should some of them be uncompressed,
+        decoder must be informed of their existence in order to follow proper history.
+        Use ZSTD_insertBlock() for such a case.
+*/
+
+#define ZSTD_BLOCKSIZELOG_MAX 17
+#define ZSTD_BLOCKSIZE_MAX   (1<<ZSTD_BLOCKSIZELOG_MAX)   /* define, for static allocation */
+/*=====   Raw zstd block functions  =====*/
+ZSTDLIB_API size_t ZSTD_getBlockSize   (const ZSTD_CCtx* cctx);
+ZSTDLIB_API size_t ZSTD_compressBlock  (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIB_API size_t ZSTD_insertBlock    (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize);  /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
+
+
+#endif   /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
+
+#if defined (__cplusplus)
+}
+#endif
diff --git a/vendor/github.com/DataDog/zstd/zstd_common.c b/vendor/github.com/DataDog/zstd/zstd_common.c
new file mode 100644
index 0000000..bccc948
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_common.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+
+/*-*************************************
+*  Dependencies
+***************************************/
+#include <stdlib.h>      /* malloc, calloc, free */
+#include <string.h>      /* memset */
+#include "error_private.h"
+#include "zstd_internal.h"
+
+
+/*-****************************************
+*  Version
+******************************************/
+unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; }
+
+const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
+
+
+/*-****************************************
+*  ZSTD Error Management
+******************************************/
+/*! ZSTD_isError() :
+ *  tells if a return value is an error code */
+unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
+
+/*! ZSTD_getErrorName() :
+ *  provides error code string from function result (useful for debugging) */
+const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+/*! ZSTD_getError() :
+ *  convert a `size_t` function result into a proper ZSTD_errorCode enum */
+ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
+
+/*! ZSTD_getErrorString() :
+ *  provides error code string from enum */
+const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
+
+/*! g_debuglog_enable :
+ *  turn on/off debug traces (global switch) */
+#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG >= 2)
+int g_debuglog_enable = 1;
+#endif
+
+
+/*=**************************************************************
+*  Custom allocator
+****************************************************************/
+void* ZSTD_malloc(size_t size, ZSTD_customMem customMem)
+{
+    if (customMem.customAlloc)
+        return customMem.customAlloc(customMem.opaque, size);
+    return malloc(size);
+}
+
+void* ZSTD_calloc(size_t size, ZSTD_customMem customMem)
+{
+    if (customMem.customAlloc) {
+        /* calloc implemented as malloc+memset;
+         * not as efficient as calloc, but next best guess for custom malloc */
+        void* const ptr = customMem.customAlloc(customMem.opaque, size);
+        memset(ptr, 0, size);
+        return ptr;
+    }
+    return calloc(1, size);
+}
+
+void ZSTD_free(void* ptr, ZSTD_customMem customMem)
+{
+    if (ptr!=NULL) {
+        if (customMem.customFree)
+            customMem.customFree(customMem.opaque, ptr);
+        else
+            free(ptr);
+    }
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_compress.c b/vendor/github.com/DataDog/zstd/zstd_compress.c
new file mode 100644
index 0000000..2aa26da
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_compress.c
@@ -0,0 +1,3449 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/*-*************************************
+*  Tuning parameters
+***************************************/
+#ifndef ZSTD_CLEVEL_DEFAULT
+#  define ZSTD_CLEVEL_DEFAULT 3
+#endif
+
+
+/*-*************************************
+*  Dependencies
+***************************************/
+#include <string.h>         /* memset */
+#include "cpu.h"
+#include "mem.h"
+#define FSE_STATIC_LINKING_ONLY   /* FSE_encodeSymbol */
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#include "zstd_compress_internal.h"
+#include "zstd_fast.h"
+#include "zstd_double_fast.h"
+#include "zstd_lazy.h"
+#include "zstd_opt.h"
+#include "zstd_ldm.h"
+
+
+/*-*************************************
+*  Helper functions
+***************************************/
+size_t ZSTD_compressBound(size_t srcSize) {
+    return ZSTD_COMPRESSBOUND(srcSize);
+}
+
+
+/*-*************************************
+*  Context memory management
+***************************************/
+struct ZSTD_CDict_s {
+    void* dictBuffer;
+    const void* dictContent;
+    size_t dictContentSize;
+    void* workspace;
+    size_t workspaceSize;
+    ZSTD_matchState_t matchState;
+    ZSTD_compressedBlockState_t cBlockState;
+    ZSTD_compressionParameters cParams;
+    ZSTD_customMem customMem;
+    U32 dictID;
+};  /* typedef'd to ZSTD_CDict within "zstd.h" */
+
+ZSTD_CCtx* ZSTD_createCCtx(void)
+{
+    return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
+}
+
+ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
+{
+    ZSTD_STATIC_ASSERT(zcss_init==0);
+    ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
+    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
+    {   ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_calloc(sizeof(ZSTD_CCtx), customMem);
+        if (!cctx) return NULL;
+        cctx->customMem = customMem;
+        cctx->requestedParams.compressionLevel = ZSTD_CLEVEL_DEFAULT;
+        cctx->requestedParams.fParams.contentSizeFlag = 1;
+        cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
+        return cctx;
+    }
+}
+
+ZSTD_CCtx* ZSTD_initStaticCCtx(void *workspace, size_t workspaceSize)
+{
+    ZSTD_CCtx* const cctx = (ZSTD_CCtx*) workspace;
+    if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL;  /* minimum size */
+    if ((size_t)workspace & 7) return NULL;  /* must be 8-aligned */
+    memset(workspace, 0, workspaceSize);   /* may be a bit generous, could memset be smaller ? */
+    cctx->staticSize = workspaceSize;
+    cctx->workSpace = (void*)(cctx+1);
+    cctx->workSpaceSize = workspaceSize - sizeof(ZSTD_CCtx);
+
+    /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
+    if (cctx->workSpaceSize < HUF_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t)) return NULL;
+    assert(((size_t)cctx->workSpace & (sizeof(void*)-1)) == 0);   /* ensure correct alignment */
+    cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)cctx->workSpace;
+    cctx->blockState.nextCBlock = cctx->blockState.prevCBlock + 1;
+    {
+        void* const ptr = cctx->blockState.nextCBlock + 1;
+        cctx->entropyWorkspace = (U32*)ptr;
+    }
+    cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
+    return cctx;
+}
+
+size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
+{
+    if (cctx==NULL) return 0;   /* support free on NULL */
+    if (cctx->staticSize) return ERROR(memory_allocation);   /* not compatible with static CCtx */
+    ZSTD_free(cctx->workSpace, cctx->customMem); cctx->workSpace = NULL;
+    ZSTD_freeCDict(cctx->cdictLocal); cctx->cdictLocal = NULL;
+#ifdef ZSTD_MULTITHREAD
+    ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;
+#endif
+    ZSTD_free(cctx, cctx->customMem);
+    return 0;   /* reserved as a potential error code in the future */
+}
+
+
+static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
+{
+#ifdef ZSTD_MULTITHREAD
+    return ZSTDMT_sizeof_CCtx(cctx->mtctx);
+#else
+    (void) cctx;
+    return 0;
+#endif
+}
+
+
+size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
+{
+    if (cctx==NULL) return 0;   /* support sizeof on NULL */
+    return sizeof(*cctx) + cctx->workSpaceSize
+           + ZSTD_sizeof_CDict(cctx->cdictLocal)
+           + ZSTD_sizeof_mtctx(cctx);
+}
+
+size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
+{
+    return ZSTD_sizeof_CCtx(zcs);  /* same object */
+}
+
+/* private API call, for dictBuilder only */
+const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
+
+ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
+        const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize)
+{
+    ZSTD_compressionParameters cParams = ZSTD_getCParams(CCtxParams->compressionLevel, srcSizeHint, dictSize);
+    if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
+    if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog;
+    if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog;
+    if (CCtxParams->cParams.chainLog) cParams.chainLog = CCtxParams->cParams.chainLog;
+    if (CCtxParams->cParams.searchLog) cParams.searchLog = CCtxParams->cParams.searchLog;
+    if (CCtxParams->cParams.searchLength) cParams.searchLength = CCtxParams->cParams.searchLength;
+    if (CCtxParams->cParams.targetLength) cParams.targetLength = CCtxParams->cParams.targetLength;
+    if (CCtxParams->cParams.strategy) cParams.strategy = CCtxParams->cParams.strategy;
+    return cParams;
+}
+
+static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
+        ZSTD_compressionParameters cParams)
+{
+    ZSTD_CCtx_params cctxParams;
+    memset(&cctxParams, 0, sizeof(cctxParams));
+    cctxParams.cParams = cParams;
+    cctxParams.compressionLevel = ZSTD_CLEVEL_DEFAULT;  /* should not matter, as all cParams are presumed properly defined */
+    assert(!ZSTD_checkCParams(cParams));
+    cctxParams.fParams.contentSizeFlag = 1;
+    return cctxParams;
+}
+
+static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
+        ZSTD_customMem customMem)
+{
+    ZSTD_CCtx_params* params;
+    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
+    params = (ZSTD_CCtx_params*)ZSTD_calloc(
+            sizeof(ZSTD_CCtx_params), customMem);
+    if (!params) { return NULL; }
+    params->customMem = customMem;
+    params->compressionLevel = ZSTD_CLEVEL_DEFAULT;
+    params->fParams.contentSizeFlag = 1;
+    return params;
+}
+
+ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
+{
+    return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
+}
+
+size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
+{
+    if (params == NULL) { return 0; }
+    ZSTD_free(params, params->customMem);
+    return 0;
+}
+
+size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
+{
+    return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
+}
+
+size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
+    if (!cctxParams) { return ERROR(GENERIC); }
+    memset(cctxParams, 0, sizeof(*cctxParams));
+    cctxParams->compressionLevel = compressionLevel;
+    cctxParams->fParams.contentSizeFlag = 1;
+    return 0;
+}
+
+size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
+{
+    if (!cctxParams) { return ERROR(GENERIC); }
+    CHECK_F( ZSTD_checkCParams(params.cParams) );
+    memset(cctxParams, 0, sizeof(*cctxParams));
+    cctxParams->cParams = params.cParams;
+    cctxParams->fParams = params.fParams;
+    cctxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT;   /* should not matter, as all cParams are presumed properly defined */
+    assert(!ZSTD_checkCParams(params.cParams));
+    return 0;
+}
+
+/* ZSTD_assignParamsToCCtxParams() :
+ * params is presumed valid at this stage */
+static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams(
+        ZSTD_CCtx_params cctxParams, ZSTD_parameters params)
+{
+    ZSTD_CCtx_params ret = cctxParams;
+    ret.cParams = params.cParams;
+    ret.fParams = params.fParams;
+    ret.compressionLevel = ZSTD_CLEVEL_DEFAULT;   /* should not matter, as all cParams are presumed properly defined */
+    assert(!ZSTD_checkCParams(params.cParams));
+    return ret;
+}
+
+#define CLAMPCHECK(val,min,max) {            \
+    if (((val)<(min)) | ((val)>(max))) {     \
+        return ERROR(parameter_outOfBound);  \
+}   }
+
+
+static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
+{
+    switch(param)
+    {
+    case ZSTD_p_compressionLevel:
+    case ZSTD_p_hashLog:
+    case ZSTD_p_chainLog:
+    case ZSTD_p_searchLog:
+    case ZSTD_p_minMatch:
+    case ZSTD_p_targetLength:
+    case ZSTD_p_compressionStrategy:
+    case ZSTD_p_compressLiterals:
+        return 1;
+
+    case ZSTD_p_format:
+    case ZSTD_p_windowLog:
+    case ZSTD_p_contentSizeFlag:
+    case ZSTD_p_checksumFlag:
+    case ZSTD_p_dictIDFlag:
+    case ZSTD_p_forceMaxWindow :
+    case ZSTD_p_nbWorkers:
+    case ZSTD_p_jobSize:
+    case ZSTD_p_overlapSizeLog:
+    case ZSTD_p_enableLongDistanceMatching:
+    case ZSTD_p_ldmHashLog:
+    case ZSTD_p_ldmMinMatch:
+    case ZSTD_p_ldmBucketSizeLog:
+    case ZSTD_p_ldmHashEveryLog:
+    default:
+        return 0;
+    }
+}
+
+size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, unsigned value)
+{
+    DEBUGLOG(4, "ZSTD_CCtx_setParameter (%u, %u)", (U32)param, value);
+    if (cctx->streamStage != zcss_init) {
+        if (ZSTD_isUpdateAuthorized(param)) {
+            cctx->cParamsChanged = 1;
+        } else {
+            return ERROR(stage_wrong);
+    }   }
+
+    switch(param)
+    {
+    case ZSTD_p_format :
+        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+
+    case ZSTD_p_compressionLevel:
+        if (cctx->cdict) return ERROR(stage_wrong);
+        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+
+    case ZSTD_p_windowLog:
+    case ZSTD_p_hashLog:
+    case ZSTD_p_chainLog:
+    case ZSTD_p_searchLog:
+    case ZSTD_p_minMatch:
+    case ZSTD_p_targetLength:
+    case ZSTD_p_compressionStrategy:
+        if (cctx->cdict) return ERROR(stage_wrong);
+        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+
+    case ZSTD_p_compressLiterals:
+    case ZSTD_p_contentSizeFlag:
+    case ZSTD_p_checksumFlag:
+    case ZSTD_p_dictIDFlag:
+        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+
+    case ZSTD_p_forceMaxWindow :  /* Force back-references to remain < windowSize,
+                                   * even when referencing into Dictionary content.
+                                   * default : 0 when using a CDict, 1 when using a Prefix */
+        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+
+    case ZSTD_p_nbWorkers:
+        if ((value>0) && cctx->staticSize) {
+            return ERROR(parameter_unsupported);  /* MT not compatible with static alloc */
+        }
+        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+
+    case ZSTD_p_jobSize:
+    case ZSTD_p_overlapSizeLog:
+        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+
+    case ZSTD_p_enableLongDistanceMatching:
+    case ZSTD_p_ldmHashLog:
+    case ZSTD_p_ldmMinMatch:
+    case ZSTD_p_ldmBucketSizeLog:
+    case ZSTD_p_ldmHashEveryLog:
+        if (cctx->cdict) return ERROR(stage_wrong);
+        return ZSTD_CCtxParam_setParameter(&cctx->requestedParams, param, value);
+
+    default: return ERROR(parameter_unsupported);
+    }
+}
+
+size_t ZSTD_CCtxParam_setParameter(
+        ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, unsigned value)
+{
+    DEBUGLOG(4, "ZSTD_CCtxParam_setParameter (%u, %u)", (U32)param, value);
+    switch(param)
+    {
+    case ZSTD_p_format :
+        if (value > (unsigned)ZSTD_f_zstd1_magicless)
+            return ERROR(parameter_unsupported);
+        CCtxParams->format = (ZSTD_format_e)value;
+        return (size_t)CCtxParams->format;
+
+    case ZSTD_p_compressionLevel : {
+        int cLevel = (int)value;  /* cast expected to restore negative sign */
+        if (cLevel > ZSTD_maxCLevel()) cLevel = ZSTD_maxCLevel();
+        if (cLevel) {  /* 0 : does not change current level */
+            CCtxParams->disableLiteralCompression = (cLevel<0);  /* negative levels disable huffman */
+            CCtxParams->compressionLevel = cLevel;
+        }
+        if (CCtxParams->compressionLevel >= 0) return CCtxParams->compressionLevel;
+        return 0;  /* return type (size_t) cannot represent negative values */
+    }
+
+    case ZSTD_p_windowLog :
+        if (value>0)   /* 0 => use default */
+            CLAMPCHECK(value, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
+        CCtxParams->cParams.windowLog = value;
+        return CCtxParams->cParams.windowLog;
+
+    case ZSTD_p_hashLog :
+        if (value>0)   /* 0 => use default */
+            CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
+        CCtxParams->cParams.hashLog = value;
+        return CCtxParams->cParams.hashLog;
+
+    case ZSTD_p_chainLog :
+        if (value>0)   /* 0 => use default */
+            CLAMPCHECK(value, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
+        CCtxParams->cParams.chainLog = value;
+        return CCtxParams->cParams.chainLog;
+
+    case ZSTD_p_searchLog :
+        if (value>0)   /* 0 => use default */
+            CLAMPCHECK(value, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
+        CCtxParams->cParams.searchLog = value;
+        return value;
+
+    case ZSTD_p_minMatch :
+        if (value>0)   /* 0 => use default */
+            CLAMPCHECK(value, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
+        CCtxParams->cParams.searchLength = value;
+        return CCtxParams->cParams.searchLength;
+
+    case ZSTD_p_targetLength :
+        /* all values are valid. 0 => use default */
+        CCtxParams->cParams.targetLength = value;
+        return CCtxParams->cParams.targetLength;
+
+    case ZSTD_p_compressionStrategy :
+        if (value>0)   /* 0 => use default */
+            CLAMPCHECK(value, (unsigned)ZSTD_fast, (unsigned)ZSTD_btultra);
+        CCtxParams->cParams.strategy = (ZSTD_strategy)value;
+        return (size_t)CCtxParams->cParams.strategy;
+
+    case ZSTD_p_compressLiterals:
+        CCtxParams->disableLiteralCompression = !value;
+        return !CCtxParams->disableLiteralCompression;
+
+    case ZSTD_p_contentSizeFlag :
+        /* Content size written in frame header _when known_ (default:1) */
+        DEBUGLOG(4, "set content size flag = %u", (value>0));
+        CCtxParams->fParams.contentSizeFlag = value > 0;
+        return CCtxParams->fParams.contentSizeFlag;
+
+    case ZSTD_p_checksumFlag :
+        /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
+        CCtxParams->fParams.checksumFlag = value > 0;
+        return CCtxParams->fParams.checksumFlag;
+
+    case ZSTD_p_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
+        DEBUGLOG(4, "set dictIDFlag = %u", (value>0));
+        CCtxParams->fParams.noDictIDFlag = !value;
+        return !CCtxParams->fParams.noDictIDFlag;
+
+    case ZSTD_p_forceMaxWindow :
+        CCtxParams->forceWindow = (value > 0);
+        return CCtxParams->forceWindow;
+
+    case ZSTD_p_nbWorkers :
+#ifndef ZSTD_MULTITHREAD
+        if (value>0) return ERROR(parameter_unsupported);
+        return 0;
+#else
+        return ZSTDMT_CCtxParam_setNbWorkers(CCtxParams, value);
+#endif
+
+    case ZSTD_p_jobSize :
+#ifndef ZSTD_MULTITHREAD
+        return ERROR(parameter_unsupported);
+#else
+        return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_jobSize, value);
+#endif
+
+    case ZSTD_p_overlapSizeLog :
+#ifndef ZSTD_MULTITHREAD
+        return ERROR(parameter_unsupported);
+#else
+        return ZSTDMT_CCtxParam_setMTCtxParameter(CCtxParams, ZSTDMT_p_overlapSectionLog, value);
+#endif
+
+    case ZSTD_p_enableLongDistanceMatching :
+        CCtxParams->ldmParams.enableLdm = (value>0);
+        return CCtxParams->ldmParams.enableLdm;
+
+    case ZSTD_p_ldmHashLog :
+        if (value>0)   /* 0 ==> auto */
+            CLAMPCHECK(value, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
+        CCtxParams->ldmParams.hashLog = value;
+        return CCtxParams->ldmParams.hashLog;
+
+    case ZSTD_p_ldmMinMatch :
+        if (value>0)   /* 0 ==> default */
+            CLAMPCHECK(value, ZSTD_LDM_MINMATCH_MIN, ZSTD_LDM_MINMATCH_MAX);
+        CCtxParams->ldmParams.minMatchLength = value;
+        return CCtxParams->ldmParams.minMatchLength;
+
+    case ZSTD_p_ldmBucketSizeLog :
+        if (value > ZSTD_LDM_BUCKETSIZELOG_MAX)
+            return ERROR(parameter_outOfBound);
+        CCtxParams->ldmParams.bucketSizeLog = value;
+        return CCtxParams->ldmParams.bucketSizeLog;
+
+    case ZSTD_p_ldmHashEveryLog :
+        if (value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN)
+            return ERROR(parameter_outOfBound);
+        CCtxParams->ldmParams.hashEveryLog = value;
+        return CCtxParams->ldmParams.hashEveryLog;
+
+    default: return ERROR(parameter_unsupported);
+    }
+}
+
+/** ZSTD_CCtx_setParametersUsingCCtxParams() :
+ *  just applies `params` into `cctx`
+ *  no action is performed, parameters are merely stored.
+ *  If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
+ *    This is possible even if a compression is ongoing.
+ *    In which case, new parameters will be applied on the fly, starting with next compression job.
+ */
+size_t ZSTD_CCtx_setParametersUsingCCtxParams(
+        ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
+{
+    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
+    if (cctx->cdict) return ERROR(stage_wrong);
+
+    cctx->requestedParams = *params;
+    return 0;
+}
+
+ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
+{
+    DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize);
+    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
+    cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
+    return 0;
+}
+
+size_t ZSTD_CCtx_loadDictionary_advanced(
+        ZSTD_CCtx* cctx, const void* dict, size_t dictSize,
+        ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
+{
+    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
+    if (cctx->staticSize) return ERROR(memory_allocation);  /* no malloc for static CCtx */
+    DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
+    ZSTD_freeCDict(cctx->cdictLocal);  /* in case one already exists */
+    if (dict==NULL || dictSize==0) {   /* no dictionary mode */
+        cctx->cdictLocal = NULL;
+        cctx->cdict = NULL;
+    } else {
+        ZSTD_compressionParameters const cParams =
+                ZSTD_getCParamsFromCCtxParams(&cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, dictSize);
+        cctx->cdictLocal = ZSTD_createCDict_advanced(
+                                dict, dictSize,
+                                dictLoadMethod, dictContentType,
+                                cParams, cctx->customMem);
+        cctx->cdict = cctx->cdictLocal;
+        if (cctx->cdictLocal == NULL)
+            return ERROR(memory_allocation);
+    }
+    return 0;
+}
+
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(
+      ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
+{
+    return ZSTD_CCtx_loadDictionary_advanced(
+            cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
+}
+
+ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
+{
+    return ZSTD_CCtx_loadDictionary_advanced(
+            cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
+}
+
+
+size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
+{
+    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
+    cctx->cdict = cdict;
+    memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));  /* exclusive */
+    return 0;
+}
+
+size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
+{
+    return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
+}
+
+size_t ZSTD_CCtx_refPrefix_advanced(
+        ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
+{
+    if (cctx->streamStage != zcss_init) return ERROR(stage_wrong);
+    cctx->cdict = NULL;   /* prefix discards any prior cdict */
+    cctx->prefixDict.dict = prefix;
+    cctx->prefixDict.dictSize = prefixSize;
+    cctx->prefixDict.dictContentType = dictContentType;
+    return 0;
+}
+
+static void ZSTD_startNewCompression(ZSTD_CCtx* cctx)
+{
+    cctx->streamStage = zcss_init;
+    cctx->pledgedSrcSizePlusOne = 0;
+}
+
+/*! ZSTD_CCtx_reset() :
+ *  Also dumps dictionary */
+void ZSTD_CCtx_reset(ZSTD_CCtx* cctx)
+{
+    ZSTD_startNewCompression(cctx);
+    cctx->cdict = NULL;
+}
+
+/** ZSTD_checkCParams() :
+    control CParam values remain within authorized range.
+    @return : 0, or an error code if one value is beyond authorized range */
+size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
+{
+    CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
+    CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
+    CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
+    CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
+    CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
+    if ((U32)(cParams.targetLength) < ZSTD_TARGETLENGTH_MIN)
+        return ERROR(parameter_unsupported);
+    if ((U32)(cParams.strategy) > (U32)ZSTD_btultra)
+        return ERROR(parameter_unsupported);
+    return 0;
+}
+
+/** ZSTD_clampCParams() :
+ *  make CParam values within valid range.
+ *  @return : valid CParams */
+static ZSTD_compressionParameters ZSTD_clampCParams(ZSTD_compressionParameters cParams)
+{
+#   define CLAMP(val,min,max) {      \
+        if (val<min) val=min;        \
+        else if (val>max) val=max;   \
+    }
+    CLAMP(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
+    CLAMP(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
+    CLAMP(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
+    CLAMP(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
+    CLAMP(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
+    if ((U32)(cParams.targetLength) < ZSTD_TARGETLENGTH_MIN) cParams.targetLength = ZSTD_TARGETLENGTH_MIN;
+    if ((U32)(cParams.strategy) > (U32)ZSTD_btultra) cParams.strategy = ZSTD_btultra;
+    return cParams;
+}
+
+/** ZSTD_cycleLog() :
+ *  condition for correct operation : hashLog > 1 */
+static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
+{
+    U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
+    return hashLog - btScale;
+}
+
+/** ZSTD_adjustCParams_internal() :
+    optimize `cPar` for a given input (`srcSize` and `dictSize`).
+    mostly downsizing to reduce memory consumption and initialization latency.
+    Both `srcSize` and `dictSize` are optional (use 0 if unknown).
+    Note : cPar is considered validated at this stage. Use ZSTD_checkCParams() to ensure that condition. */
+ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
+{
+    static const U64 minSrcSize = 513; /* (1<<9) + 1 */
+    static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
+    assert(ZSTD_checkCParams(cPar)==0);
+
+    if (dictSize && (srcSize+1<2) /* srcSize unknown */ )
+        srcSize = minSrcSize;  /* presumed small when there is a dictionary */
+    else if (srcSize == 0)
+        srcSize = ZSTD_CONTENTSIZE_UNKNOWN;  /* 0 == unknown : presumed large */
+
+    /* resize windowLog if input is small enough, to use less memory */
+    if ( (srcSize < maxWindowResize)
+      && (dictSize < maxWindowResize) )  {
+        U32 const tSize = (U32)(srcSize + dictSize);
+        static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
+        U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
+                            ZSTD_highbit32(tSize-1) + 1;
+        if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
+    }
+    if (cPar.hashLog > cPar.windowLog) cPar.hashLog = cPar.windowLog;
+    {   U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
+        if (cycleLog > cPar.windowLog)
+            cPar.chainLog -= (cycleLog - cPar.windowLog);
+    }
+
+    if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
+        cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN;  /* required for frame header */
+
+    return cPar;
+}
+
+ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
+{
+    cPar = ZSTD_clampCParams(cPar);
+    return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize);
+}
+
+static size_t ZSTD_sizeof_matchState(ZSTD_compressionParameters const* cParams, const U32 forCCtx)
+{
+    size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
+    size_t const hSize = ((size_t)1) << cParams->hashLog;
+    U32    const hashLog3 = (forCCtx && cParams->searchLength==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
+    size_t const h3Size = ((size_t)1) << hashLog3;
+    size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
+    size_t const optPotentialSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits)) * sizeof(U32)
+                          + (ZSTD_OPT_NUM+1) * (sizeof(ZSTD_match_t)+sizeof(ZSTD_optimal_t));
+    size_t const optSpace = (forCCtx && ((cParams->strategy == ZSTD_btopt) ||
+                                         (cParams->strategy == ZSTD_btultra)))
+                                ? optPotentialSpace
+                                : 0;
+    DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
+                (U32)chainSize, (U32)hSize, (U32)h3Size);
+    return tableSpace + optSpace;
+}
+
+size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
+{
+    /* Estimate CCtx size is supported for single-threaded compression only. */
+    if (params->nbWorkers > 0) { return ERROR(GENERIC); }
+    {   ZSTD_compressionParameters const cParams =
+                ZSTD_getCParamsFromCCtxParams(params, 0, 0);
+        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog);
+        U32    const divider = (cParams.searchLength==3) ? 3 : 4;
+        size_t const maxNbSeq = blockSize / divider;
+        size_t const tokenSpace = blockSize + 11*maxNbSeq;
+        size_t const entropySpace = HUF_WORKSPACE_SIZE;
+        size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
+        size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 1);
+
+        size_t const ldmSpace = ZSTD_ldm_getTableSize(params->ldmParams);
+        size_t const ldmSeqSpace = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize) * sizeof(rawSeq);
+
+        size_t const neededSpace = entropySpace + blockStateSpace + tokenSpace +
+                                   matchStateSize + ldmSpace + ldmSeqSpace;
+
+        DEBUGLOG(5, "sizeof(ZSTD_CCtx) : %u", (U32)sizeof(ZSTD_CCtx));
+        DEBUGLOG(5, "estimate workSpace : %u", (U32)neededSpace);
+        return sizeof(ZSTD_CCtx) + neededSpace;
+    }
+}
+
+size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
+{
+    ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
+    return ZSTD_estimateCCtxSize_usingCCtxParams(&params);
+}
+
+static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
+{
+    ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
+    return ZSTD_estimateCCtxSize_usingCParams(cParams);
+}
+
+size_t ZSTD_estimateCCtxSize(int compressionLevel)
+{
+    int level;
+    size_t memBudget = 0;
+    for (level=1; level<=compressionLevel; level++) {
+        size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
+        if (newMB > memBudget) memBudget = newMB;
+    }
+    return memBudget;
+}
+
+size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
+{
+    if (params->nbWorkers > 0) { return ERROR(GENERIC); }
+    {   size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params);
+        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << params->cParams.windowLog);
+        size_t const inBuffSize = ((size_t)1 << params->cParams.windowLog) + blockSize;
+        size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
+        size_t const streamingSize = inBuffSize + outBuffSize;
+
+        return CCtxSize + streamingSize;
+    }
+}
+
+size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
+{
+    ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
+    return ZSTD_estimateCStreamSize_usingCCtxParams(&params);
+}
+
+static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel) {
+    ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, 0);
+    return ZSTD_estimateCStreamSize_usingCParams(cParams);
+}
+
+size_t ZSTD_estimateCStreamSize(int compressionLevel) {
+    int level;
+    size_t memBudget = 0;
+    for (level=1; level<=compressionLevel; level++) {
+        size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
+        if (newMB > memBudget) memBudget = newMB;
+    }
+    return memBudget;
+}
+
+/* ZSTD_getFrameProgression():
+ * tells how much data has been consumed (input) and produced (output) for current frame.
+ * able to count progression inside worker threads (non-blocking mode).
+ */
+ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
+{
+#ifdef ZSTD_MULTITHREAD
+    if (cctx->appliedParams.nbWorkers > 0) {
+        return ZSTDMT_getFrameProgression(cctx->mtctx);
+    }
+#endif
+    {   ZSTD_frameProgression fp;
+        size_t const buffered = (cctx->inBuff == NULL) ? 0 :
+                                cctx->inBuffPos - cctx->inToCompress;
+        if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
+        assert(buffered <= ZSTD_BLOCKSIZE_MAX);
+        fp.ingested = cctx->consumedSrcSize + buffered;
+        fp.consumed = cctx->consumedSrcSize;
+        fp.produced = cctx->producedCSize;
+        return fp;
+}   }
+
+
+static U32 ZSTD_equivalentCParams(ZSTD_compressionParameters cParams1,
+                                  ZSTD_compressionParameters cParams2)
+{
+    return (cParams1.hashLog  == cParams2.hashLog)
+         & (cParams1.chainLog == cParams2.chainLog)
+         & (cParams1.strategy == cParams2.strategy)   /* opt parser space */
+         & ((cParams1.searchLength==3) == (cParams2.searchLength==3));  /* hashlog3 space */
+}
+
+/** The parameters are equivalent if ldm is not enabled in both sets or
+ *  all the parameters are equivalent. */
+static U32 ZSTD_equivalentLdmParams(ldmParams_t ldmParams1,
+                                    ldmParams_t ldmParams2)
+{
+    return (!ldmParams1.enableLdm && !ldmParams2.enableLdm) ||
+           (ldmParams1.enableLdm == ldmParams2.enableLdm &&
+            ldmParams1.hashLog == ldmParams2.hashLog &&
+            ldmParams1.bucketSizeLog == ldmParams2.bucketSizeLog &&
+            ldmParams1.minMatchLength == ldmParams2.minMatchLength &&
+            ldmParams1.hashEveryLog == ldmParams2.hashEveryLog);
+}
+
+typedef enum { ZSTDb_not_buffered, ZSTDb_buffered } ZSTD_buffered_policy_e;
+
+/* ZSTD_sufficientBuff() :
+ * check internal buffers exist for streaming if buffPol == ZSTDb_buffered .
+ * Note : they are assumed to be correctly sized if ZSTD_equivalentCParams()==1 */
+static U32 ZSTD_sufficientBuff(size_t bufferSize1, size_t blockSize1,
+                            ZSTD_buffered_policy_e buffPol2,
+                            ZSTD_compressionParameters cParams2,
+                            U64 pledgedSrcSize)
+{
+    size_t const windowSize2 = MAX(1, (size_t)MIN(((U64)1 << cParams2.windowLog), pledgedSrcSize));
+    size_t const blockSize2 = MIN(ZSTD_BLOCKSIZE_MAX, windowSize2);
+    size_t const neededBufferSize2 = (buffPol2==ZSTDb_buffered) ? windowSize2 + blockSize2 : 0;
+    DEBUGLOG(4, "ZSTD_sufficientBuff: is windowSize2=%u <= wlog1=%u",
+                (U32)windowSize2, cParams2.windowLog);
+    DEBUGLOG(4, "ZSTD_sufficientBuff: is blockSize2=%u <= blockSize1=%u",
+                (U32)blockSize2, (U32)blockSize1);
+    return (blockSize2 <= blockSize1) /* seqStore space depends on blockSize */
+         & (neededBufferSize2 <= bufferSize1);
+}
+
+/** Equivalence for resetCCtx purposes */
+static U32 ZSTD_equivalentParams(ZSTD_CCtx_params params1,
+                                 ZSTD_CCtx_params params2,
+                                 size_t buffSize1, size_t blockSize1,
+                                 ZSTD_buffered_policy_e buffPol2,
+                                 U64 pledgedSrcSize)
+{
+    DEBUGLOG(4, "ZSTD_equivalentParams: pledgedSrcSize=%u", (U32)pledgedSrcSize);
+    return ZSTD_equivalentCParams(params1.cParams, params2.cParams) &&
+           ZSTD_equivalentLdmParams(params1.ldmParams, params2.ldmParams) &&
+           ZSTD_sufficientBuff(buffSize1, blockSize1, buffPol2, params2.cParams, pledgedSrcSize);
+}
+
+static void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
+{
+    int i;
+    for (i = 0; i < ZSTD_REP_NUM; ++i)
+        bs->rep[i] = repStartValue[i];
+    bs->entropy.hufCTable_repeatMode = HUF_repeat_none;
+    bs->entropy.offcode_repeatMode = FSE_repeat_none;
+    bs->entropy.matchlength_repeatMode = FSE_repeat_none;
+    bs->entropy.litlength_repeatMode = FSE_repeat_none;
+}
+
+/*! ZSTD_invalidateMatchState()
+ * Invalidate all the matches in the match finder tables.
+ * Requires nextSrc and base to be set (can be NULL).
+ */
+static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
+{
+    ZSTD_window_clear(&ms->window);
+
+    ms->nextToUpdate = ms->window.dictLimit + 1;
+    ms->loadedDictEnd = 0;
+    ms->opt.litLengthSum = 0;  /* force reset of btopt stats */
+}
+
+/*! ZSTD_continueCCtx() :
+ *  reuse CCtx without reset (note : requires no dictionary) */
+static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_CCtx_params params, U64 pledgedSrcSize)
+{
+    size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
+    size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
+    DEBUGLOG(4, "ZSTD_continueCCtx: re-use context in place");
+
+    cctx->blockSize = blockSize;   /* previous block size could be different even for same windowLog, due to pledgedSrcSize */
+    cctx->appliedParams = params;
+    cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
+    cctx->consumedSrcSize = 0;
+    cctx->producedCSize = 0;
+    if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
+        cctx->appliedParams.fParams.contentSizeFlag = 0;
+    DEBUGLOG(4, "pledged content size : %u ; flag : %u",
+        (U32)pledgedSrcSize, cctx->appliedParams.fParams.contentSizeFlag);
+    cctx->stage = ZSTDcs_init;
+    cctx->dictID = 0;
+    if (params.ldmParams.enableLdm)
+        ZSTD_window_clear(&cctx->ldmState.window);
+    ZSTD_referenceExternalSequences(cctx, NULL, 0);
+    ZSTD_invalidateMatchState(&cctx->blockState.matchState);
+    ZSTD_reset_compressedBlockState(cctx->blockState.prevCBlock);
+    XXH64_reset(&cctx->xxhState, 0);
+    return 0;
+}
+
+typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset } ZSTD_compResetPolicy_e;
+
+static void* ZSTD_reset_matchState(ZSTD_matchState_t* ms, void* ptr, ZSTD_compressionParameters const* cParams, ZSTD_compResetPolicy_e const crp, U32 const forCCtx)
+{
+    size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
+    size_t const hSize = ((size_t)1) << cParams->hashLog;
+    U32    const hashLog3 = (forCCtx && cParams->searchLength==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
+    size_t const h3Size = ((size_t)1) << hashLog3;
+    size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
+
+    assert(((size_t)ptr & 3) == 0);
+
+    ms->hashLog3 = hashLog3;
+    memset(&ms->window, 0, sizeof(ms->window));
+    ZSTD_invalidateMatchState(ms);
+
+    /* opt parser space */
+    if (forCCtx && ((cParams->strategy == ZSTD_btopt) | (cParams->strategy == ZSTD_btultra))) {
+        DEBUGLOG(4, "reserving optimal parser space");
+        ms->opt.litFreq = (U32*)ptr;
+        ms->opt.litLengthFreq = ms->opt.litFreq + (1<<Litbits);
+        ms->opt.matchLengthFreq = ms->opt.litLengthFreq + (MaxLL+1);
+        ms->opt.offCodeFreq = ms->opt.matchLengthFreq + (MaxML+1);
+        ptr = ms->opt.offCodeFreq + (MaxOff+1);
+        ms->opt.matchTable = (ZSTD_match_t*)ptr;
+        ptr = ms->opt.matchTable + ZSTD_OPT_NUM+1;
+        ms->opt.priceTable = (ZSTD_optimal_t*)ptr;
+        ptr = ms->opt.priceTable + ZSTD_OPT_NUM+1;
+    }
+
+    /* table Space */
+    DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_noMemset);
+    assert(((size_t)ptr & 3) == 0);  /* ensure ptr is properly aligned */
+    if (crp!=ZSTDcrp_noMemset) memset(ptr, 0, tableSpace);   /* reset tables only */
+    ms->hashTable = (U32*)(ptr);
+    ms->chainTable = ms->hashTable + hSize;
+    ms->hashTable3 = ms->chainTable + chainSize;
+    ptr = ms->hashTable3 + h3Size;
+
+    assert(((size_t)ptr & 3) == 0);
+    return ptr;
+}
+
+/*! ZSTD_resetCCtx_internal() :
+    note : `params` are assumed fully validated at this stage */
+static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
+                                      ZSTD_CCtx_params params, U64 pledgedSrcSize,
+                                      ZSTD_compResetPolicy_e const crp,
+                                      ZSTD_buffered_policy_e const zbuff)
+{
+    DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u",
+                (U32)pledgedSrcSize, params.cParams.windowLog);
+    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+
+    if (crp == ZSTDcrp_continue) {
+        if (ZSTD_equivalentParams(zc->appliedParams, params,
+                                zc->inBuffSize, zc->blockSize,
+                                zbuff, pledgedSrcSize)) {
+            DEBUGLOG(4, "ZSTD_equivalentParams()==1 -> continue mode (wLog1=%u, blockSize1=%u)",
+                        zc->appliedParams.cParams.windowLog, (U32)zc->blockSize);
+            return ZSTD_continueCCtx(zc, params, pledgedSrcSize);
+    }   }
+    DEBUGLOG(4, "ZSTD_equivalentParams()==0 -> reset CCtx");
+
+    if (params.ldmParams.enableLdm) {
+        /* Adjust long distance matching parameters */
+        params.ldmParams.windowLog = params.cParams.windowLog;
+        ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
+        assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
+        assert(params.ldmParams.hashEveryLog < 32);
+        zc->ldmState.hashPower =
+                ZSTD_ldm_getHashPower(params.ldmParams.minMatchLength);
+    }
+
+    {   size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
+        size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
+        U32    const divider = (params.cParams.searchLength==3) ? 3 : 4;
+        size_t const maxNbSeq = blockSize / divider;
+        size_t const tokenSpace = blockSize + 11*maxNbSeq;
+        size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0;
+        size_t const buffInSize = (zbuff==ZSTDb_buffered) ? windowSize + blockSize : 0;
+        size_t const matchStateSize = ZSTD_sizeof_matchState(&params.cParams, /* forCCtx */ 1);
+        size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize);
+        void* ptr;
+
+        /* Check if workSpace is large enough, alloc a new one if needed */
+        {   size_t const entropySpace = HUF_WORKSPACE_SIZE;
+            size_t const blockStateSpace = 2 * sizeof(ZSTD_compressedBlockState_t);
+            size_t const bufferSpace = buffInSize + buffOutSize;
+            size_t const ldmSpace = ZSTD_ldm_getTableSize(params.ldmParams);
+            size_t const ldmSeqSpace = maxNbLdmSeq * sizeof(rawSeq);
+
+            size_t const neededSpace = entropySpace + blockStateSpace + ldmSpace +
+                                       ldmSeqSpace + matchStateSize + tokenSpace +
+                                       bufferSpace;
+            DEBUGLOG(4, "Need %uKB workspace, including %uKB for match state, and %uKB for buffers",
+                        (U32)(neededSpace>>10), (U32)(matchStateSize>>10), (U32)(bufferSpace>>10));
+            DEBUGLOG(4, "windowSize: %u - blockSize: %u", (U32)windowSize, (U32)blockSize);
+
+            if (zc->workSpaceSize < neededSpace) {  /* too small : resize */
+                DEBUGLOG(4, "Need to update workSpaceSize from %uK to %uK",
+                            (unsigned)(zc->workSpaceSize>>10),
+                            (unsigned)(neededSpace>>10));
+                /* static cctx : no resize, error out */
+                if (zc->staticSize) return ERROR(memory_allocation);
+
+                zc->workSpaceSize = 0;
+                ZSTD_free(zc->workSpace, zc->customMem);
+                zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
+                if (zc->workSpace == NULL) return ERROR(memory_allocation);
+                zc->workSpaceSize = neededSpace;
+                ptr = zc->workSpace;
+
+                /* Statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
+                assert(((size_t)zc->workSpace & 3) == 0);   /* ensure correct alignment */
+                assert(zc->workSpaceSize >= 2 * sizeof(ZSTD_compressedBlockState_t));
+                zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)zc->workSpace;
+                zc->blockState.nextCBlock = zc->blockState.prevCBlock + 1;
+                ptr = zc->blockState.nextCBlock + 1;
+                zc->entropyWorkspace = (U32*)ptr;
+        }   }
+
+        /* init params */
+        zc->appliedParams = params;
+        zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
+        zc->consumedSrcSize = 0;
+        zc->producedCSize = 0;
+        if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
+            zc->appliedParams.fParams.contentSizeFlag = 0;
+        DEBUGLOG(4, "pledged content size : %u ; flag : %u",
+            (U32)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
+        zc->blockSize = blockSize;
+
+        XXH64_reset(&zc->xxhState, 0);
+        zc->stage = ZSTDcs_init;
+        zc->dictID = 0;
+
+        ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
+
+        ptr = zc->entropyWorkspace + HUF_WORKSPACE_SIZE_U32;
+
+        /* ldm hash table */
+        /* initialize bucketOffsets table later for pointer alignment */
+        if (params.ldmParams.enableLdm) {
+            size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog;
+            memset(ptr, 0, ldmHSize * sizeof(ldmEntry_t));
+            assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
+            zc->ldmState.hashTable = (ldmEntry_t*)ptr;
+            ptr = zc->ldmState.hashTable + ldmHSize;
+            zc->ldmSequences = (rawSeq*)ptr;
+            ptr = zc->ldmSequences + maxNbLdmSeq;
+            zc->maxNbLdmSequences = maxNbLdmSeq;
+
+            memset(&zc->ldmState.window, 0, sizeof(zc->ldmState.window));
+        }
+        assert(((size_t)ptr & 3) == 0); /* ensure ptr is properly aligned */
+
+        ptr = ZSTD_reset_matchState(&zc->blockState.matchState, ptr, &params.cParams, crp, /* forCCtx */ 1);
+
+        /* sequences storage */
+        zc->seqStore.sequencesStart = (seqDef*)ptr;
+        ptr = zc->seqStore.sequencesStart + maxNbSeq;
+        zc->seqStore.llCode = (BYTE*) ptr;
+        zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
+        zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
+        zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
+        ptr = zc->seqStore.litStart + blockSize;
+
+        /* ldm bucketOffsets table */
+        if (params.ldmParams.enableLdm) {
+            size_t const ldmBucketSize =
+                  ((size_t)1) << (params.ldmParams.hashLog -
+                                  params.ldmParams.bucketSizeLog);
+            memset(ptr, 0, ldmBucketSize);
+            zc->ldmState.bucketOffsets = (BYTE*)ptr;
+            ptr = zc->ldmState.bucketOffsets + ldmBucketSize;
+            ZSTD_window_clear(&zc->ldmState.window);
+        }
+        ZSTD_referenceExternalSequences(zc, NULL, 0);
+
+        /* buffers */
+        zc->inBuffSize = buffInSize;
+        zc->inBuff = (char*)ptr;
+        zc->outBuffSize = buffOutSize;
+        zc->outBuff = zc->inBuff + buffInSize;
+
+        return 0;
+    }
+}
+
+/* ZSTD_invalidateRepCodes() :
+ * ensures next compression will not use repcodes from previous block.
+ * Note : only works with regular variant;
+ *        do not use with extDict variant ! */
+void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
+    int i;
+    for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
+    assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
+}
+
+static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
+                            const ZSTD_CDict* cdict,
+                            unsigned windowLog,
+                            ZSTD_frameParameters fParams,
+                            U64 pledgedSrcSize,
+                            ZSTD_buffered_policy_e zbuff)
+{
+    {   ZSTD_CCtx_params params = cctx->requestedParams;
+        /* Copy only compression parameters related to tables. */
+        params.cParams = cdict->cParams;
+        if (windowLog) params.cParams.windowLog = windowLog;
+        params.fParams = fParams;
+        ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+                                ZSTDcrp_noMemset, zbuff);
+        assert(cctx->appliedParams.cParams.strategy == cdict->cParams.strategy);
+        assert(cctx->appliedParams.cParams.hashLog == cdict->cParams.hashLog);
+        assert(cctx->appliedParams.cParams.chainLog == cdict->cParams.chainLog);
+    }
+
+    /* copy tables */
+    {   size_t const chainSize = (cdict->cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict->cParams.chainLog);
+        size_t const hSize =  (size_t)1 << cdict->cParams.hashLog;
+        size_t const tableSpace = (chainSize + hSize) * sizeof(U32);
+        assert((U32*)cctx->blockState.matchState.chainTable == (U32*)cctx->blockState.matchState.hashTable + hSize);  /* chainTable must follow hashTable */
+        assert((U32*)cctx->blockState.matchState.hashTable3 == (U32*)cctx->blockState.matchState.chainTable + chainSize);
+        assert((U32*)cdict->matchState.chainTable == (U32*)cdict->matchState.hashTable + hSize);  /* chainTable must follow hashTable */
+        assert((U32*)cdict->matchState.hashTable3 == (U32*)cdict->matchState.chainTable + chainSize);
+        memcpy(cctx->blockState.matchState.hashTable, cdict->matchState.hashTable, tableSpace);   /* presumes all tables follow each other */
+    }
+    /* Zero the hashTable3, since the cdict never fills it */
+    {   size_t const h3Size = (size_t)1 << cctx->blockState.matchState.hashLog3;
+        assert(cdict->matchState.hashLog3 == 0);
+        memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
+    }
+
+    /* copy dictionary offsets */
+    {
+        ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
+        ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
+        dstMatchState->window       = srcMatchState->window;
+        dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
+        dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3;
+        dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
+    }
+    cctx->dictID = cdict->dictID;
+
+    /* copy block state */
+    memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
+
+    return 0;
+}
+
+/*! ZSTD_copyCCtx_internal() :
+ *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
+ *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
+ *  The "context", in this case, refers to the hash and chain tables,
+ *  entropy tables, and dictionary references.
+ * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
+ * @return : 0, or an error code */
+static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
+                            const ZSTD_CCtx* srcCCtx,
+                            ZSTD_frameParameters fParams,
+                            U64 pledgedSrcSize,
+                            ZSTD_buffered_policy_e zbuff)
+{
+    DEBUGLOG(5, "ZSTD_copyCCtx_internal");
+    if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong);
+
+    memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
+    {   ZSTD_CCtx_params params = dstCCtx->requestedParams;
+        /* Copy only compression parameters related to tables. */
+        params.cParams = srcCCtx->appliedParams.cParams;
+        params.fParams = fParams;
+        ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize,
+                                ZSTDcrp_noMemset, zbuff);
+        assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
+        assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
+        assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
+        assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
+        assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
+    }
+
+    /* copy tables */
+    {   size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
+        size_t const hSize =  (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
+        size_t const h3Size = (size_t)1 << srcCCtx->blockState.matchState.hashLog3;
+        size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
+        assert((U32*)dstCCtx->blockState.matchState.chainTable == (U32*)dstCCtx->blockState.matchState.hashTable + hSize);  /* chainTable must follow hashTable */
+        assert((U32*)dstCCtx->blockState.matchState.hashTable3 == (U32*)dstCCtx->blockState.matchState.chainTable + chainSize);
+        memcpy(dstCCtx->blockState.matchState.hashTable, srcCCtx->blockState.matchState.hashTable, tableSpace);   /* presumes all tables follow each other */
+    }
+
+    /* copy dictionary offsets */
+    {
+        ZSTD_matchState_t const* srcMatchState = &srcCCtx->blockState.matchState;
+        ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
+        dstMatchState->window       = srcMatchState->window;
+        dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
+        dstMatchState->nextToUpdate3= srcMatchState->nextToUpdate3;
+        dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
+    }
+    dstCCtx->dictID = srcCCtx->dictID;
+
+    /* copy block state */
+    memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
+
+    return 0;
+}
+
+/*! ZSTD_copyCCtx() :
+ *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
+ *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
+ *  pledgedSrcSize==0 means "unknown".
+*   @return : 0, or an error code */
+size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
+{
+    ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
+    ZSTD_buffered_policy_e const zbuff = (ZSTD_buffered_policy_e)(srcCCtx->inBuffSize>0);
+    ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
+    if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
+    fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
+
+    return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
+                                fParams, pledgedSrcSize,
+                                zbuff);
+}
+
+
+#define ZSTD_ROWSIZE 16
+/*! ZSTD_reduceTable() :
+ *  reduce table indexes by `reducerValue`, or squash to zero.
+ *  PreserveMark preserves "unsorted mark" for btlazy2 strategy.
+ *  It must be set to a clear 0/1 value, to remove branch during inlining.
+ *  Presume table size is a multiple of ZSTD_ROWSIZE
+ *  to help auto-vectorization */
+FORCE_INLINE_TEMPLATE void
+ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
+{
+    int const nbRows = (int)size / ZSTD_ROWSIZE;
+    int cellNb = 0;
+    int rowNb;
+    assert((size & (ZSTD_ROWSIZE-1)) == 0);  /* multiple of ZSTD_ROWSIZE */
+    assert(size < (1U<<31));   /* can be casted to int */
+    for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
+        int column;
+        for (column=0; column<ZSTD_ROWSIZE; column++) {
+            if (preserveMark) {
+                U32 const adder = (table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) ? reducerValue : 0;
+                table[cellNb] += adder;
+            }
+            if (table[cellNb] < reducerValue) table[cellNb] = 0;
+            else table[cellNb] -= reducerValue;
+            cellNb++;
+    }   }
+}
+
+static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
+{
+    ZSTD_reduceTable_internal(table, size, reducerValue, 0);
+}
+
+static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
+{
+    ZSTD_reduceTable_internal(table, size, reducerValue, 1);
+}
+
+/*! ZSTD_reduceIndex() :
+*   rescale all indexes to avoid future overflow (indexes are U32) */
+static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue)
+{
+    ZSTD_matchState_t* const ms = &zc->blockState.matchState;
+    {   U32 const hSize = (U32)1 << zc->appliedParams.cParams.hashLog;
+        ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
+    }
+
+    if (zc->appliedParams.cParams.strategy != ZSTD_fast) {
+        U32 const chainSize = (U32)1 << zc->appliedParams.cParams.chainLog;
+        if (zc->appliedParams.cParams.strategy == ZSTD_btlazy2)
+            ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
+        else
+            ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
+    }
+
+    if (ms->hashLog3) {
+        U32 const h3Size = (U32)1 << ms->hashLog3;
+        ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
+    }
+}
+
+
+/*-*******************************************************
+*  Block entropic compression
+*********************************************************/
+
+/* See doc/zstd_compression_format.md for detailed format description */
+
+size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
+    memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
+    MEM_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw);
+    return ZSTD_blockHeaderSize+srcSize;
+}
+
+
+static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    BYTE* const ostart = (BYTE* const)dst;
+    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
+
+    if (srcSize + flSize > dstCapacity) return ERROR(dstSize_tooSmall);
+
+    switch(flSize)
+    {
+        case 1: /* 2 - 1 - 5 */
+            ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
+            break;
+        case 2: /* 2 - 2 - 12 */
+            MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
+            break;
+        case 3: /* 2 - 2 - 20 */
+            MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
+            break;
+        default:   /* not necessary : flSize is {1,2,3} */
+            assert(0);
+    }
+
+    memcpy(ostart + flSize, src, srcSize);
+    return srcSize + flSize;
+}
+
+static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    BYTE* const ostart = (BYTE* const)dst;
+    U32   const flSize = 1 + (srcSize>31) + (srcSize>4095);
+
+    (void)dstCapacity;  /* dstCapacity already guaranteed to be >=4, hence large enough */
+
+    switch(flSize)
+    {
+        case 1: /* 2 - 1 - 5 */
+            ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
+            break;
+        case 2: /* 2 - 2 - 12 */
+            MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
+            break;
+        case 3: /* 2 - 2 - 20 */
+            MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
+            break;
+        default:   /* not necessary : flSize is {1,2,3} */
+            assert(0);
+    }
+
+    ostart[flSize] = *(const BYTE*)src;
+    return flSize+1;
+}
+
+
+static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; }
+
+static size_t ZSTD_compressLiterals (ZSTD_entropyCTables_t const* prevEntropy,
+                                     ZSTD_entropyCTables_t* nextEntropy,
+                                     ZSTD_strategy strategy, int disableLiteralCompression,
+                                     void* dst, size_t dstCapacity,
+                               const void* src, size_t srcSize,
+                                     U32* workspace, const int bmi2)
+{
+    size_t const minGain = ZSTD_minGain(srcSize);
+    size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
+    BYTE*  const ostart = (BYTE*)dst;
+    U32 singleStream = srcSize < 256;
+    symbolEncodingType_e hType = set_compressed;
+    size_t cLitSize;
+
+    DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i)",
+                disableLiteralCompression);
+
+    /* Prepare nextEntropy assuming reusing the existing table */
+    nextEntropy->hufCTable_repeatMode = prevEntropy->hufCTable_repeatMode;
+    memcpy(nextEntropy->hufCTable, prevEntropy->hufCTable,
+           sizeof(prevEntropy->hufCTable));
+
+    if (disableLiteralCompression)
+        return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+
+    /* small ? don't even attempt compression (speed opt) */
+#   define COMPRESS_LITERALS_SIZE_MIN 63
+    {   size_t const minLitSize = (prevEntropy->hufCTable_repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
+        if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+    }
+
+    if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall);   /* not enough space for compression */
+    {   HUF_repeat repeat = prevEntropy->hufCTable_repeatMode;
+        int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
+        if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
+        cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
+                                      workspace, HUF_WORKSPACE_SIZE, (HUF_CElt*)nextEntropy->hufCTable, &repeat, preferRepeat, bmi2)
+                                : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
+                                      workspace, HUF_WORKSPACE_SIZE, (HUF_CElt*)nextEntropy->hufCTable, &repeat, preferRepeat, bmi2);
+        if (repeat != HUF_repeat_none) {
+            /* reused the existing table */
+            hType = set_repeat;
+        }
+    }
+
+    if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
+        memcpy(nextEntropy->hufCTable, prevEntropy->hufCTable, sizeof(prevEntropy->hufCTable));
+        return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
+    }
+    if (cLitSize==1) {
+        memcpy(nextEntropy->hufCTable, prevEntropy->hufCTable, sizeof(prevEntropy->hufCTable));
+        return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
+    }
+
+    if (hType == set_compressed) {
+        /* using a newly constructed table */
+        nextEntropy->hufCTable_repeatMode = HUF_repeat_check;
+    }
+
+    /* Build header */
+    switch(lhSize)
+    {
+    case 3: /* 2 - 2 - 10 - 10 */
+        {   U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
+            MEM_writeLE24(ostart, lhc);
+            break;
+        }
+    case 4: /* 2 - 2 - 14 - 14 */
+        {   U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
+            MEM_writeLE32(ostart, lhc);
+            break;
+        }
+    case 5: /* 2 - 2 - 18 - 18 */
+        {   U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
+            MEM_writeLE32(ostart, lhc);
+            ostart[4] = (BYTE)(cLitSize >> 10);
+            break;
+        }
+    default:  /* not possible : lhSize is {3,4,5} */
+        assert(0);
+    }
+    return lhSize+cLitSize;
+}
+
+
+void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
+{
+    const seqDef* const sequences = seqStorePtr->sequencesStart;
+    BYTE* const llCodeTable = seqStorePtr->llCode;
+    BYTE* const ofCodeTable = seqStorePtr->ofCode;
+    BYTE* const mlCodeTable = seqStorePtr->mlCode;
+    U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+    U32 u;
+    for (u=0; u<nbSeq; u++) {
+        U32 const llv = sequences[u].litLength;
+        U32 const mlv = sequences[u].matchLength;
+        llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
+        ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
+        mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
+    }
+    if (seqStorePtr->longLengthID==1)
+        llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
+    if (seqStorePtr->longLengthID==2)
+        mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
+}
+
+typedef enum {
+    ZSTD_defaultDisallowed = 0,
+    ZSTD_defaultAllowed = 1
+} ZSTD_defaultPolicy_e;
+
+MEM_STATIC
+symbolEncodingType_e ZSTD_selectEncodingType(
+        FSE_repeat* repeatMode, size_t const mostFrequent, size_t nbSeq,
+        U32 defaultNormLog, ZSTD_defaultPolicy_e const isDefaultAllowed)
+{
+#define MIN_SEQ_FOR_DYNAMIC_FSE   64
+#define MAX_SEQ_FOR_STATIC_FSE  1000
+    ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
+    if ((mostFrequent == nbSeq) && (!isDefaultAllowed || nbSeq > 2)) {
+        DEBUGLOG(5, "Selected set_rle");
+        /* Prefer set_basic over set_rle when there are 2 or less symbols,
+         * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
+         * If basic encoding isn't possible, always choose RLE.
+         */
+        *repeatMode = FSE_repeat_check;
+        return set_rle;
+    }
+    if ( isDefaultAllowed
+      && (*repeatMode == FSE_repeat_valid) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
+        DEBUGLOG(5, "Selected set_repeat");
+        return set_repeat;
+    }
+    if ( isDefaultAllowed
+      && ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (defaultNormLog-1)))) ) {
+        DEBUGLOG(5, "Selected set_basic");
+        /* The format allows default tables to be repeated, but it isn't useful.
+         * When using simple heuristics to select encoding type, we don't want
+         * to confuse these tables with dictionaries. When running more careful
+         * analysis, we don't need to waste time checking both repeating tables
+         * and default tables.
+         */
+        *repeatMode = FSE_repeat_none;
+        return set_basic;
+    }
+    DEBUGLOG(5, "Selected set_compressed");
+    *repeatMode = FSE_repeat_check;
+    return set_compressed;
+}
+
+MEM_STATIC
+size_t ZSTD_buildCTable(void* dst, size_t dstCapacity,
+        FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
+        U32* count, U32 max,
+        BYTE const* codeTable, size_t nbSeq,
+        S16 const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
+        FSE_CTable const* prevCTable, size_t prevCTableSize,
+        void* workspace, size_t workspaceSize)
+{
+    BYTE* op = (BYTE*)dst;
+    BYTE const* const oend = op + dstCapacity;
+
+    switch (type) {
+    case set_rle:
+        *op = codeTable[0];
+        CHECK_F(FSE_buildCTable_rle(nextCTable, (BYTE)max));
+        return 1;
+    case set_repeat:
+        memcpy(nextCTable, prevCTable, prevCTableSize);
+        return 0;
+    case set_basic:
+        CHECK_F(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, workspace, workspaceSize));  /* note : could be pre-calculated */
+        return 0;
+    case set_compressed: {
+        S16 norm[MaxSeq + 1];
+        size_t nbSeq_1 = nbSeq;
+        const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
+        if (count[codeTable[nbSeq-1]] > 1) {
+            count[codeTable[nbSeq-1]]--;
+            nbSeq_1--;
+        }
+        assert(nbSeq_1 > 1);
+        CHECK_F(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
+        {   size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog);   /* overflow protected */
+            if (FSE_isError(NCountSize)) return NCountSize;
+            CHECK_F(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, workspace, workspaceSize));
+            return NCountSize;
+        }
+    }
+    default: return assert(0), ERROR(GENERIC);
+    }
+}
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_encodeSequences_body(
+            void* dst, size_t dstCapacity,
+            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+            seqDef const* sequences, size_t nbSeq, int longOffsets)
+{
+    BIT_CStream_t blockStream;
+    FSE_CState_t  stateMatchLength;
+    FSE_CState_t  stateOffsetBits;
+    FSE_CState_t  stateLitLength;
+
+    CHECK_E(BIT_initCStream(&blockStream, dst, dstCapacity), dstSize_tooSmall); /* not enough space remaining */
+
+    /* first symbols */
+    FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
+    FSE_initCState2(&stateOffsetBits,  CTable_OffsetBits,  ofCodeTable[nbSeq-1]);
+    FSE_initCState2(&stateLitLength,   CTable_LitLength,   llCodeTable[nbSeq-1]);
+    BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
+    if (MEM_32bits()) BIT_flushBits(&blockStream);
+    BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
+    if (MEM_32bits()) BIT_flushBits(&blockStream);
+    if (longOffsets) {
+        U32 const ofBits = ofCodeTable[nbSeq-1];
+        int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
+        if (extraBits) {
+            BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
+            BIT_flushBits(&blockStream);
+        }
+        BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
+                    ofBits - extraBits);
+    } else {
+        BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
+    }
+    BIT_flushBits(&blockStream);
+
+    {   size_t n;
+        for (n=nbSeq-2 ; n<nbSeq ; n--) {      /* intentional underflow */
+            BYTE const llCode = llCodeTable[n];
+            BYTE const ofCode = ofCodeTable[n];
+            BYTE const mlCode = mlCodeTable[n];
+            U32  const llBits = LL_bits[llCode];
+            U32  const ofBits = ofCode;
+            U32  const mlBits = ML_bits[mlCode];
+            DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
+                        sequences[n].litLength,
+                        sequences[n].matchLength + MINMATCH,
+                        sequences[n].offset);
+                                                                            /* 32b*/  /* 64b*/
+                                                                            /* (7)*/  /* (7)*/
+            FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode);       /* 15 */  /* 15 */
+            FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode);      /* 24 */  /* 24 */
+            if (MEM_32bits()) BIT_flushBits(&blockStream);                  /* (7)*/
+            FSE_encodeSymbol(&blockStream, &stateLitLength, llCode);        /* 16 */  /* 33 */
+            if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
+                BIT_flushBits(&blockStream);                                /* (7)*/
+            BIT_addBits(&blockStream, sequences[n].litLength, llBits);
+            if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
+            BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
+            if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
+            if (longOffsets) {
+                int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
+                if (extraBits) {
+                    BIT_addBits(&blockStream, sequences[n].offset, extraBits);
+                    BIT_flushBits(&blockStream);                            /* (7)*/
+                }
+                BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
+                            ofBits - extraBits);                            /* 31 */
+            } else {
+                BIT_addBits(&blockStream, sequences[n].offset, ofBits);     /* 31 */
+            }
+            BIT_flushBits(&blockStream);                                    /* (7)*/
+    }   }
+
+    DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog);
+    FSE_flushCState(&blockStream, &stateMatchLength);
+    DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog);
+    FSE_flushCState(&blockStream, &stateOffsetBits);
+    DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog);
+    FSE_flushCState(&blockStream, &stateLitLength);
+
+    {   size_t const streamSize = BIT_closeCStream(&blockStream);
+        if (streamSize==0) return ERROR(dstSize_tooSmall);   /* not enough space */
+        return streamSize;
+    }
+}
+
+static size_t
+ZSTD_encodeSequences_default(
+            void* dst, size_t dstCapacity,
+            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+            seqDef const* sequences, size_t nbSeq, int longOffsets)
+{
+    return ZSTD_encodeSequences_body(dst, dstCapacity,
+                                    CTable_MatchLength, mlCodeTable,
+                                    CTable_OffsetBits, ofCodeTable,
+                                    CTable_LitLength, llCodeTable,
+                                    sequences, nbSeq, longOffsets);
+}
+
+
+#if DYNAMIC_BMI2
+
+static TARGET_ATTRIBUTE("bmi2") size_t
+ZSTD_encodeSequences_bmi2(
+            void* dst, size_t dstCapacity,
+            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+            seqDef const* sequences, size_t nbSeq, int longOffsets)
+{
+    return ZSTD_encodeSequences_body(dst, dstCapacity,
+                                    CTable_MatchLength, mlCodeTable,
+                                    CTable_OffsetBits, ofCodeTable,
+                                    CTable_LitLength, llCodeTable,
+                                    sequences, nbSeq, longOffsets);
+}
+
+#endif
+
+size_t ZSTD_encodeSequences(
+            void* dst, size_t dstCapacity,
+            FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
+            FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
+            FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
+            seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
+{
+#if DYNAMIC_BMI2
+    if (bmi2) {
+        return ZSTD_encodeSequences_bmi2(dst, dstCapacity,
+                                         CTable_MatchLength, mlCodeTable,
+                                         CTable_OffsetBits, ofCodeTable,
+                                         CTable_LitLength, llCodeTable,
+                                         sequences, nbSeq, longOffsets);
+    }
+#endif
+    (void)bmi2;
+    return ZSTD_encodeSequences_default(dst, dstCapacity,
+                                        CTable_MatchLength, mlCodeTable,
+                                        CTable_OffsetBits, ofCodeTable,
+                                        CTable_LitLength, llCodeTable,
+                                        sequences, nbSeq, longOffsets);
+}
+
+MEM_STATIC size_t ZSTD_compressSequences_internal(seqStore_t* seqStorePtr,
+                              ZSTD_entropyCTables_t const* prevEntropy,
+                              ZSTD_entropyCTables_t* nextEntropy,
+                              ZSTD_CCtx_params const* cctxParams,
+                              void* dst, size_t dstCapacity, U32* workspace,
+                              const int bmi2)
+{
+    const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
+    U32 count[MaxSeq+1];
+    FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;
+    FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;
+    FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;
+    U32 LLtype, Offtype, MLtype;   /* compressed, raw or rle */
+    const seqDef* const sequences = seqStorePtr->sequencesStart;
+    const BYTE* const ofCodeTable = seqStorePtr->ofCode;
+    const BYTE* const llCodeTable = seqStorePtr->llCode;
+    const BYTE* const mlCodeTable = seqStorePtr->mlCode;
+    BYTE* const ostart = (BYTE*)dst;
+    BYTE* const oend = ostart + dstCapacity;
+    BYTE* op = ostart;
+    size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
+    BYTE* seqHead;
+
+    ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
+
+    /* Compress literals */
+    {   const BYTE* const literals = seqStorePtr->litStart;
+        size_t const litSize = seqStorePtr->lit - literals;
+        size_t const cSize = ZSTD_compressLiterals(
+                                    prevEntropy, nextEntropy,
+                                    cctxParams->cParams.strategy, cctxParams->disableLiteralCompression,
+                                    op, dstCapacity,
+                                    literals, litSize,
+                                    workspace, bmi2);
+        if (ZSTD_isError(cSize))
+          return cSize;
+        assert(cSize <= dstCapacity);
+        op += cSize;
+    }
+
+    /* Sequences Header */
+    if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/) return ERROR(dstSize_tooSmall);
+    if (nbSeq < 0x7F)
+        *op++ = (BYTE)nbSeq;
+    else if (nbSeq < LONGNBSEQ)
+        op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
+    else
+        op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
+    if (nbSeq==0) {
+      memcpy(nextEntropy->litlengthCTable, prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable));
+      nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
+      memcpy(nextEntropy->offcodeCTable, prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable));
+      nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
+      memcpy(nextEntropy->matchlengthCTable, prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable));
+      nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
+      return op - ostart;
+    }
+
+    /* seqHead : flags for FSE encoding type */
+    seqHead = op++;
+
+    /* convert length/distances into codes */
+    ZSTD_seqToCodes(seqStorePtr);
+    /* build CTable for Literal Lengths */
+    {   U32 max = MaxLL;
+        size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace);
+        DEBUGLOG(5, "Building LL table");
+        nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
+        LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode, mostFrequent, nbSeq, LL_defaultNormLog, ZSTD_defaultAllowed);
+        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
+                    count, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
+                    prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable),
+                    workspace, HUF_WORKSPACE_SIZE);
+            if (ZSTD_isError(countSize)) return countSize;
+            op += countSize;
+    }   }
+    /* build CTable for Offsets */
+    {   U32 max = MaxOff;
+        size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace);
+        /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
+        ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
+        DEBUGLOG(5, "Building OF table");
+        nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
+        Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode, mostFrequent, nbSeq, OF_defaultNormLog, defaultPolicy);
+        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
+                    count, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
+                    prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable),
+                    workspace, HUF_WORKSPACE_SIZE);
+            if (ZSTD_isError(countSize)) return countSize;
+            op += countSize;
+    }   }
+    /* build CTable for MatchLengths */
+    {   U32 max = MaxML;
+        size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace);
+        DEBUGLOG(5, "Building ML table");
+        nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
+        MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode, mostFrequent, nbSeq, ML_defaultNormLog, ZSTD_defaultAllowed);
+        {   size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
+                    count, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
+                    prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable),
+                    workspace, HUF_WORKSPACE_SIZE);
+            if (ZSTD_isError(countSize)) return countSize;
+            op += countSize;
+    }   }
+
+    *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
+
+    {   size_t const bitstreamSize = ZSTD_encodeSequences(
+                                        op, oend - op,
+                                        CTable_MatchLength, mlCodeTable,
+                                        CTable_OffsetBits, ofCodeTable,
+                                        CTable_LitLength, llCodeTable,
+                                        sequences, nbSeq,
+                                        longOffsets, bmi2);
+        if (ZSTD_isError(bitstreamSize)) return bitstreamSize;
+        op += bitstreamSize;
+    }
+
+    return op - ostart;
+}
+
+MEM_STATIC size_t ZSTD_compressSequences(seqStore_t* seqStorePtr,
+                              ZSTD_entropyCTables_t const* prevEntropy,
+                              ZSTD_entropyCTables_t* nextEntropy,
+                              ZSTD_CCtx_params const* cctxParams,
+                              void* dst, size_t dstCapacity,
+                              size_t srcSize, U32* workspace, int bmi2)
+{
+    size_t const cSize = ZSTD_compressSequences_internal(
+            seqStorePtr, prevEntropy, nextEntropy, cctxParams, dst, dstCapacity,
+            workspace, bmi2);
+    /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
+     * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
+     */
+    if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
+        return 0;  /* block not compressed */
+    if (ZSTD_isError(cSize)) return cSize;
+
+    /* Check compressibility */
+    {   size_t const maxCSize = srcSize - ZSTD_minGain(srcSize);  /* note : fixed formula, maybe should depend on compression level, or strategy */
+        if (cSize >= maxCSize) return 0;  /* block not compressed */
+    }
+
+    /* We check that dictionaries have offset codes available for the first
+     * block. After the first block, the offcode table might not have large
+     * enough codes to represent the offsets in the data.
+     */
+    if (nextEntropy->offcode_repeatMode == FSE_repeat_valid)
+        nextEntropy->offcode_repeatMode = FSE_repeat_check;
+
+    return cSize;
+}
+
+/* ZSTD_selectBlockCompressor() :
+ * Not static, but internal use only (used by long distance matcher)
+ * assumption : strat is a valid strategy */
+ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
+{
+    static const ZSTD_blockCompressor blockCompressor[2][(unsigned)ZSTD_btultra+1] = {
+        { ZSTD_compressBlock_fast  /* default for 0 */,
+          ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy,
+          ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, ZSTD_compressBlock_btlazy2,
+          ZSTD_compressBlock_btopt, ZSTD_compressBlock_btultra },
+        { ZSTD_compressBlock_fast_extDict  /* default for 0 */,
+          ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict,
+          ZSTD_compressBlock_lazy_extDict,ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict,
+          ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btultra_extDict }
+    };
+    ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
+
+    assert((U32)strat >= (U32)ZSTD_fast);
+    assert((U32)strat <= (U32)ZSTD_btultra);
+    return blockCompressor[extDict!=0][(U32)strat];
+}
+
+static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
+                                   const BYTE* anchor, size_t lastLLSize)
+{
+    memcpy(seqStorePtr->lit, anchor, lastLLSize);
+    seqStorePtr->lit += lastLLSize;
+}
+
+static void ZSTD_resetSeqStore(seqStore_t* ssPtr)
+{
+    ssPtr->lit = ssPtr->litStart;
+    ssPtr->sequences = ssPtr->sequencesStart;
+    ssPtr->longLengthID = 0;
+}
+
+static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
+                                        void* dst, size_t dstCapacity,
+                                        const void* src, size_t srcSize)
+{
+    ZSTD_matchState_t* const ms = &zc->blockState.matchState;
+    DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
+                (U32)dstCapacity, ms->window.dictLimit, ms->nextToUpdate);
+    if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
+        ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.searchLength);
+        return 0;   /* don't even attempt compression below a certain srcSize */
+    }
+    ZSTD_resetSeqStore(&(zc->seqStore));
+
+    /* limited update after a very long match */
+    {   const BYTE* const base = ms->window.base;
+        const BYTE* const istart = (const BYTE*)src;
+        const U32 current = (U32)(istart-base);
+        if (current > ms->nextToUpdate + 384)
+            ms->nextToUpdate = current - MIN(192, (U32)(current - ms->nextToUpdate - 384));
+    }
+
+    /* select and store sequences */
+    {   U32 const extDict = ZSTD_window_hasExtDict(ms->window);
+        size_t lastLLSize;
+        {   int i;
+            for (i = 0; i < ZSTD_REP_NUM; ++i)
+                zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
+        }
+        if (zc->externSeqStore.pos < zc->externSeqStore.size) {
+            assert(!zc->appliedParams.ldmParams.enableLdm);
+            /* Updates ldmSeqStore.pos */
+            lastLLSize =
+                ZSTD_ldm_blockCompress(&zc->externSeqStore,
+                                       ms, &zc->seqStore,
+                                       zc->blockState.nextCBlock->rep,
+                                       &zc->appliedParams.cParams,
+                                       src, srcSize, extDict);
+            assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
+        } else if (zc->appliedParams.ldmParams.enableLdm) {
+            rawSeqStore_t ldmSeqStore = {NULL, 0, 0, 0};
+
+            ldmSeqStore.seq = zc->ldmSequences;
+            ldmSeqStore.capacity = zc->maxNbLdmSequences;
+            /* Updates ldmSeqStore.size */
+            CHECK_F(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
+                                               &zc->appliedParams.ldmParams,
+                                               src, srcSize));
+            /* Updates ldmSeqStore.pos */
+            lastLLSize =
+                ZSTD_ldm_blockCompress(&ldmSeqStore,
+                                       ms, &zc->seqStore,
+                                       zc->blockState.nextCBlock->rep,
+                                       &zc->appliedParams.cParams,
+                                       src, srcSize, extDict);
+            assert(ldmSeqStore.pos == ldmSeqStore.size);
+        } else {   /* not long range mode */
+            ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, extDict);
+            lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, &zc->appliedParams.cParams, src, srcSize);
+        }
+        {   const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
+            ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
+    }   }
+
+    /* encode sequences and literals */
+    {   size_t const cSize = ZSTD_compressSequences(&zc->seqStore,
+                                &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
+                                &zc->appliedParams,
+                                dst, dstCapacity,
+                                srcSize, zc->entropyWorkspace, zc->bmi2);
+        if (ZSTD_isError(cSize) || cSize == 0) return cSize;
+        /* confirm repcodes and entropy tables */
+        {   ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
+            zc->blockState.prevCBlock = zc->blockState.nextCBlock;
+            zc->blockState.nextCBlock = tmp;
+        }
+        return cSize;
+    }
+}
+
+
+/*! ZSTD_compress_frameChunk() :
+*   Compress a chunk of data into one or multiple blocks.
+*   All blocks will be terminated, all input will be consumed.
+*   Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
+*   Frame is supposed already started (header already produced)
+*   @return : compressed size, or an error code
+*/
+static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx,
+                                     void* dst, size_t dstCapacity,
+                               const void* src, size_t srcSize,
+                                     U32 lastFrameChunk)
+{
+    size_t blockSize = cctx->blockSize;
+    size_t remaining = srcSize;
+    const BYTE* ip = (const BYTE*)src;
+    BYTE* const ostart = (BYTE*)dst;
+    BYTE* op = ostart;
+    U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
+    assert(cctx->appliedParams.cParams.windowLog <= 31);
+
+    DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (U32)blockSize);
+    if (cctx->appliedParams.fParams.checksumFlag && srcSize)
+        XXH64_update(&cctx->xxhState, src, srcSize);
+
+    while (remaining) {
+        ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
+        U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
+
+        if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE)
+            return ERROR(dstSize_tooSmall);   /* not enough space to store compressed block */
+        if (remaining < blockSize) blockSize = remaining;
+
+        if (ZSTD_window_needOverflowCorrection(ms->window, ip + blockSize)) {
+            U32 const cycleLog = ZSTD_cycleLog(cctx->appliedParams.cParams.chainLog, cctx->appliedParams.cParams.strategy);
+            U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
+            ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
+            ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
+            ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
+
+            ZSTD_reduceIndex(cctx, correction);
+            if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
+            else ms->nextToUpdate -= correction;
+            ms->loadedDictEnd = 0;
+        }
+        ZSTD_window_enforceMaxDist(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd);
+        if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
+
+        {   size_t cSize = ZSTD_compressBlock_internal(cctx,
+                                op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
+                                ip, blockSize);
+            if (ZSTD_isError(cSize)) return cSize;
+
+            if (cSize == 0) {  /* block is not compressible */
+                U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(blockSize << 3);
+                if (blockSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
+                MEM_writeLE32(op, cBlockHeader24);   /* 4th byte will be overwritten */
+                memcpy(op + ZSTD_blockHeaderSize, ip, blockSize);
+                cSize = ZSTD_blockHeaderSize + blockSize;
+            } else {
+                U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
+                MEM_writeLE24(op, cBlockHeader24);
+                cSize += ZSTD_blockHeaderSize;
+            }
+
+            ip += blockSize;
+            assert(remaining >= blockSize);
+            remaining -= blockSize;
+            op += cSize;
+            assert(dstCapacity >= cSize);
+            dstCapacity -= cSize;
+            DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
+                        (U32)cSize);
+    }   }
+
+    if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
+    return op-ostart;
+}
+
+
+static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
+                                    ZSTD_CCtx_params params, U64 pledgedSrcSize, U32 dictID)
+{   BYTE* const op = (BYTE*)dst;
+    U32   const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536);   /* 0-3 */
+    U32   const dictIDSizeCode = params.fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength;   /* 0-3 */
+    U32   const checksumFlag = params.fParams.checksumFlag>0;
+    U32   const windowSize = (U32)1 << params.cParams.windowLog;
+    U32   const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
+    BYTE  const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
+    U32   const fcsCode = params.fParams.contentSizeFlag ?
+                     (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0;  /* 0-3 */
+    BYTE  const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
+    size_t pos=0;
+
+    if (dstCapacity < ZSTD_frameHeaderSize_max) return ERROR(dstSize_tooSmall);
+    DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
+                !params.fParams.noDictIDFlag, dictID,  dictIDSizeCode);
+
+    if (params.format == ZSTD_f_zstd1) {
+        MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
+        pos = 4;
+    }
+    op[pos++] = frameHeaderDecriptionByte;
+    if (!singleSegment) op[pos++] = windowLogByte;
+    switch(dictIDSizeCode)
+    {
+        default:  assert(0); /* impossible */
+        case 0 : break;
+        case 1 : op[pos] = (BYTE)(dictID); pos++; break;
+        case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
+        case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
+    }
+    switch(fcsCode)
+    {
+        default:  assert(0); /* impossible */
+        case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
+        case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
+        case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
+        case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
+    }
+    return pos;
+}
+
+/* ZSTD_writeLastEmptyBlock() :
+ * output an empty Block with end-of-frame mark to complete a frame
+ * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
+ *           or an error code if `dstCapcity` is too small (<ZSTD_blockHeaderSize)
+ */
+size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
+{
+    if (dstCapacity < ZSTD_blockHeaderSize) return ERROR(dstSize_tooSmall);
+    {   U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1);  /* 0 size */
+        MEM_writeLE24(dst, cBlockHeader24);
+        return ZSTD_blockHeaderSize;
+    }
+}
+
+size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
+{
+    if (cctx->stage != ZSTDcs_init)
+        return ERROR(stage_wrong);
+    if (cctx->appliedParams.ldmParams.enableLdm)
+        return ERROR(parameter_unsupported);
+    cctx->externSeqStore.seq = seq;
+    cctx->externSeqStore.size = nbSeq;
+    cctx->externSeqStore.capacity = nbSeq;
+    cctx->externSeqStore.pos = 0;
+    return 0;
+}
+
+
+static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
+                              void* dst, size_t dstCapacity,
+                        const void* src, size_t srcSize,
+                               U32 frame, U32 lastFrameChunk)
+{
+    ZSTD_matchState_t* ms = &cctx->blockState.matchState;
+    size_t fhSize = 0;
+
+    DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
+                cctx->stage, (U32)srcSize);
+    if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong);   /* missing init (ZSTD_compressBegin) */
+
+    if (frame && (cctx->stage==ZSTDcs_init)) {
+        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams,
+                                       cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
+        if (ZSTD_isError(fhSize)) return fhSize;
+        dstCapacity -= fhSize;
+        dst = (char*)dst + fhSize;
+        cctx->stage = ZSTDcs_ongoing;
+    }
+
+    if (!srcSize) return fhSize;  /* do not generate an empty block if no input */
+
+    if (!ZSTD_window_update(&ms->window, src, srcSize)) {
+        ms->nextToUpdate = ms->window.dictLimit;
+    }
+    if (cctx->appliedParams.ldmParams.enableLdm)
+        ZSTD_window_update(&cctx->ldmState.window, src, srcSize);
+
+    DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (U32)cctx->blockSize);
+    {   size_t const cSize = frame ?
+                             ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
+                             ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
+        if (ZSTD_isError(cSize)) return cSize;
+        cctx->consumedSrcSize += srcSize;
+        cctx->producedCSize += (cSize + fhSize);
+        if (cctx->appliedParams.fParams.contentSizeFlag) {  /* control src size */
+            if (cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne) {
+                DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize >= %u",
+                    (U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize);
+                return ERROR(srcSize_wrong);
+            }
+        }
+        return cSize + fhSize;
+    }
+}
+
+size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
+                              void* dst, size_t dstCapacity,
+                        const void* src, size_t srcSize)
+{
+    DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (U32)srcSize);
+    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
+}
+
+
+size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
+{
+    ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
+    assert(!ZSTD_checkCParams(cParams));
+    return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog);
+}
+
+size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    size_t const blockSizeMax = ZSTD_getBlockSize(cctx);
+    if (srcSize > blockSizeMax) return ERROR(srcSize_wrong);
+    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
+}
+
+/*! ZSTD_loadDictionaryContent() :
+ *  @return : 0, or an error code
+ */
+static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const void* src, size_t srcSize)
+{
+    const BYTE* const ip = (const BYTE*) src;
+    const BYTE* const iend = ip + srcSize;
+    ZSTD_compressionParameters const* cParams = &params->cParams;
+
+    ZSTD_window_update(&ms->window, src, srcSize);
+    ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
+
+    if (srcSize <= HASH_READ_SIZE) return 0;
+
+    switch(params->cParams.strategy)
+    {
+    case ZSTD_fast:
+        ZSTD_fillHashTable(ms, cParams, iend);
+        break;
+    case ZSTD_dfast:
+        ZSTD_fillDoubleHashTable(ms, cParams, iend);
+        break;
+
+    case ZSTD_greedy:
+    case ZSTD_lazy:
+    case ZSTD_lazy2:
+        if (srcSize >= HASH_READ_SIZE)
+            ZSTD_insertAndFindFirstIndex(ms, cParams, iend-HASH_READ_SIZE);
+        break;
+
+    case ZSTD_btlazy2:   /* we want the dictionary table fully sorted */
+    case ZSTD_btopt:
+    case ZSTD_btultra:
+        if (srcSize >= HASH_READ_SIZE)
+            ZSTD_updateTree(ms, cParams, iend-HASH_READ_SIZE, iend);
+        break;
+
+    default:
+        assert(0);  /* not possible : not a valid strategy id */
+    }
+
+    ms->nextToUpdate = (U32)(iend - ms->window.base);
+    return 0;
+}
+
+
+/* Dictionaries that assign zero probability to symbols that show up causes problems
+   when FSE encoding.  Refuse dictionaries that assign zero probability to symbols
+   that we may encounter during compression.
+   NOTE: This behavior is not standard and could be improved in the future. */
+static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) {
+    U32 s;
+    if (dictMaxSymbolValue < maxSymbolValue) return ERROR(dictionary_corrupted);
+    for (s = 0; s <= maxSymbolValue; ++s) {
+        if (normalizedCounter[s] == 0) return ERROR(dictionary_corrupted);
+    }
+    return 0;
+}
+
+
+/* Dictionary format :
+ * See :
+ * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
+ */
+/*! ZSTD_loadZstdDictionary() :
+ * @return : dictID, or an error code
+ *  assumptions : magic number supposed already checked
+ *                dictSize supposed > 8
+ */
+static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const void* dict, size_t dictSize, void* workspace)
+{
+    const BYTE* dictPtr = (const BYTE*)dict;
+    const BYTE* const dictEnd = dictPtr + dictSize;
+    short offcodeNCount[MaxOff+1];
+    unsigned offcodeMaxValue = MaxOff;
+    size_t dictID;
+
+    ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
+
+    dictPtr += 4;   /* skip magic number */
+    dictID = params->fParams.noDictIDFlag ? 0 :  MEM_readLE32(dictPtr);
+    dictPtr += 4;
+
+    {   unsigned maxSymbolValue = 255;
+        size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.hufCTable, &maxSymbolValue, dictPtr, dictEnd-dictPtr);
+        if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
+        if (maxSymbolValue < 255) return ERROR(dictionary_corrupted);
+        dictPtr += hufHeaderSize;
+    }
+
+    {   unsigned offcodeLog;
+        size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
+        if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
+        if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
+        /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
+        CHECK_E( FSE_buildCTable_wksp(bs->entropy.offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, workspace, HUF_WORKSPACE_SIZE),
+                 dictionary_corrupted);
+        dictPtr += offcodeHeaderSize;
+    }
+
+    {   short matchlengthNCount[MaxML+1];
+        unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+        size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
+        if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
+        if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
+        /* Every match length code must have non-zero probability */
+        CHECK_F( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
+        CHECK_E( FSE_buildCTable_wksp(bs->entropy.matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, workspace, HUF_WORKSPACE_SIZE),
+                 dictionary_corrupted);
+        dictPtr += matchlengthHeaderSize;
+    }
+
+    {   short litlengthNCount[MaxLL+1];
+        unsigned litlengthMaxValue = MaxLL, litlengthLog;
+        size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
+        if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
+        if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
+        /* Every literal length code must have non-zero probability */
+        CHECK_F( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
+        CHECK_E( FSE_buildCTable_wksp(bs->entropy.litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, workspace, HUF_WORKSPACE_SIZE),
+                 dictionary_corrupted);
+        dictPtr += litlengthHeaderSize;
+    }
+
+    if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
+    bs->rep[0] = MEM_readLE32(dictPtr+0);
+    bs->rep[1] = MEM_readLE32(dictPtr+4);
+    bs->rep[2] = MEM_readLE32(dictPtr+8);
+    dictPtr += 12;
+
+    {   size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
+        U32 offcodeMax = MaxOff;
+        if (dictContentSize <= ((U32)-1) - 128 KB) {
+            U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
+            offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
+        }
+        /* All offset values <= dictContentSize + 128 KB must be representable */
+        CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
+        /* All repCodes must be <= dictContentSize and != 0*/
+        {   U32 u;
+            for (u=0; u<3; u++) {
+                if (bs->rep[u] == 0) return ERROR(dictionary_corrupted);
+                if (bs->rep[u] > dictContentSize) return ERROR(dictionary_corrupted);
+        }   }
+
+        bs->entropy.hufCTable_repeatMode = HUF_repeat_valid;
+        bs->entropy.offcode_repeatMode = FSE_repeat_valid;
+        bs->entropy.matchlength_repeatMode = FSE_repeat_valid;
+        bs->entropy.litlength_repeatMode = FSE_repeat_valid;
+        CHECK_F(ZSTD_loadDictionaryContent(ms, params, dictPtr, dictContentSize));
+        return dictID;
+    }
+}
+
+/** ZSTD_compress_insertDictionary() :
+*   @return : dictID, or an error code */
+static size_t ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, ZSTD_matchState_t* ms,
+                                             ZSTD_CCtx_params const* params,
+                                       const void* dict, size_t dictSize,
+                                             ZSTD_dictContentType_e dictContentType,
+                                             void* workspace)
+{
+    DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
+    if ((dict==NULL) || (dictSize<=8)) return 0;
+
+    ZSTD_reset_compressedBlockState(bs);
+
+    /* dict restricted modes */
+    if (dictContentType == ZSTD_dct_rawContent)
+        return ZSTD_loadDictionaryContent(ms, params, dict, dictSize);
+
+    if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
+        if (dictContentType == ZSTD_dct_auto) {
+            DEBUGLOG(4, "raw content dictionary detected");
+            return ZSTD_loadDictionaryContent(ms, params, dict, dictSize);
+        }
+        if (dictContentType == ZSTD_dct_fullDict)
+            return ERROR(dictionary_wrong);
+        assert(0);   /* impossible */
+    }
+
+    /* dict as full zstd dictionary */
+    return ZSTD_loadZstdDictionary(bs, ms, params, dict, dictSize, workspace);
+}
+
+/*! ZSTD_compressBegin_internal() :
+ * @return : 0, or an error code */
+size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
+                             const void* dict, size_t dictSize,
+                             ZSTD_dictContentType_e dictContentType,
+                             const ZSTD_CDict* cdict,
+                             ZSTD_CCtx_params params, U64 pledgedSrcSize,
+                             ZSTD_buffered_policy_e zbuff)
+{
+    DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params.cParams.windowLog);
+    /* params are supposed to be fully validated at this point */
+    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
+
+    if (cdict && cdict->dictContentSize>0) {
+        cctx->requestedParams = params;
+        return ZSTD_resetCCtx_usingCDict(cctx, cdict, params.cParams.windowLog,
+                                         params.fParams, pledgedSrcSize, zbuff);
+    }
+
+    CHECK_F( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
+                                     ZSTDcrp_continue, zbuff) );
+    {
+        size_t const dictID = ZSTD_compress_insertDictionary(
+                cctx->blockState.prevCBlock, &cctx->blockState.matchState,
+                &params, dict, dictSize, dictContentType, cctx->entropyWorkspace);
+        if (ZSTD_isError(dictID)) return dictID;
+        assert(dictID <= (size_t)(U32)-1);
+        cctx->dictID = (U32)dictID;
+    }
+    return 0;
+}
+
+size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
+                                    const void* dict, size_t dictSize,
+                                    ZSTD_dictContentType_e dictContentType,
+                                    const ZSTD_CDict* cdict,
+                                    ZSTD_CCtx_params params,
+                                    unsigned long long pledgedSrcSize)
+{
+    DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params.cParams.windowLog);
+    /* compression parameters verification and optimization */
+    CHECK_F( ZSTD_checkCParams(params.cParams) );
+    return ZSTD_compressBegin_internal(cctx,
+                                       dict, dictSize, dictContentType,
+                                       cdict,
+                                       params, pledgedSrcSize,
+                                       ZSTDb_not_buffered);
+}
+
+/*! ZSTD_compressBegin_advanced() :
+*   @return : 0, or an error code */
+size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
+                             const void* dict, size_t dictSize,
+                                   ZSTD_parameters params, unsigned long long pledgedSrcSize)
+{
+    ZSTD_CCtx_params const cctxParams =
+            ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
+    return ZSTD_compressBegin_advanced_internal(cctx,
+                                            dict, dictSize, ZSTD_dct_auto,
+                                            NULL /*cdict*/,
+                                            cctxParams, pledgedSrcSize);
+}
+
+size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
+{
+    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize);
+    ZSTD_CCtx_params const cctxParams =
+            ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
+    DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (U32)dictSize);
+    return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, NULL,
+                                       cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
+}
+
+size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
+{
+    return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel);
+}
+
+
+/*! ZSTD_writeEpilogue() :
+*   Ends a frame.
+*   @return : nb of bytes written into dst (or an error code) */
+static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
+{
+    BYTE* const ostart = (BYTE*)dst;
+    BYTE* op = ostart;
+    size_t fhSize = 0;
+
+    DEBUGLOG(4, "ZSTD_writeEpilogue");
+    if (cctx->stage == ZSTDcs_created) return ERROR(stage_wrong);  /* init missing */
+
+    /* special case : empty frame */
+    if (cctx->stage == ZSTDcs_init) {
+        fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->appliedParams, 0, 0);
+        if (ZSTD_isError(fhSize)) return fhSize;
+        dstCapacity -= fhSize;
+        op += fhSize;
+        cctx->stage = ZSTDcs_ongoing;
+    }
+
+    if (cctx->stage != ZSTDcs_ending) {
+        /* write one last empty block, make it the "last" block */
+        U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
+        if (dstCapacity<4) return ERROR(dstSize_tooSmall);
+        MEM_writeLE32(op, cBlockHeader24);
+        op += ZSTD_blockHeaderSize;
+        dstCapacity -= ZSTD_blockHeaderSize;
+    }
+
+    if (cctx->appliedParams.fParams.checksumFlag) {
+        U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
+        if (dstCapacity<4) return ERROR(dstSize_tooSmall);
+        DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", checksum);
+        MEM_writeLE32(op, checksum);
+        op += 4;
+    }
+
+    cctx->stage = ZSTDcs_created;  /* return to "created but no init" status */
+    return op-ostart;
+}
+
+size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
+                         void* dst, size_t dstCapacity,
+                   const void* src, size_t srcSize)
+{
+    size_t endResult;
+    size_t const cSize = ZSTD_compressContinue_internal(cctx,
+                                dst, dstCapacity, src, srcSize,
+                                1 /* frame mode */, 1 /* last chunk */);
+    if (ZSTD_isError(cSize)) return cSize;
+    endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
+    if (ZSTD_isError(endResult)) return endResult;
+    if (cctx->appliedParams.fParams.contentSizeFlag) {  /* control src size */
+        DEBUGLOG(4, "end of frame : controlling src size");
+        if (cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1) {
+            DEBUGLOG(4, "error : pledgedSrcSize = %u, while realSrcSize = %u",
+                (U32)cctx->pledgedSrcSizePlusOne-1, (U32)cctx->consumedSrcSize);
+            return ERROR(srcSize_wrong);
+    }   }
+    return cSize + endResult;
+}
+
+
+static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,
+                               void* dst, size_t dstCapacity,
+                         const void* src, size_t srcSize,
+                         const void* dict,size_t dictSize,
+                               ZSTD_parameters params)
+{
+    ZSTD_CCtx_params const cctxParams =
+            ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
+    DEBUGLOG(4, "ZSTD_compress_internal");
+    return ZSTD_compress_advanced_internal(cctx,
+                                          dst, dstCapacity,
+                                          src, srcSize,
+                                          dict, dictSize,
+                                          cctxParams);
+}
+
+size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
+                               void* dst, size_t dstCapacity,
+                         const void* src, size_t srcSize,
+                         const void* dict,size_t dictSize,
+                               ZSTD_parameters params)
+{
+    DEBUGLOG(4, "ZSTD_compress_advanced");
+    CHECK_F(ZSTD_checkCParams(params.cParams));
+    return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
+}
+
+/* Internal */
+size_t ZSTD_compress_advanced_internal(
+        ZSTD_CCtx* cctx,
+        void* dst, size_t dstCapacity,
+        const void* src, size_t srcSize,
+        const void* dict,size_t dictSize,
+        ZSTD_CCtx_params params)
+{
+    DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)",
+                (U32)srcSize);
+    CHECK_F( ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, NULL,
+                                         params, srcSize, ZSTDb_not_buffered) );
+    return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
+}
+
+size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize,
+                               const void* dict, size_t dictSize, int compressionLevel)
+{
+    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, srcSize ? srcSize : 1, dict ? dictSize : 0);
+    ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(cctx->requestedParams, params);
+    assert(params.fParams.contentSizeFlag == 1);
+    ZSTD_CCtxParam_setParameter(&cctxParams, ZSTD_p_compressLiterals, compressionLevel>=0);
+    return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, cctxParams);
+}
+
+size_t ZSTD_compressCCtx (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel)
+{
+    DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (U32)srcSize);
+    return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
+}
+
+size_t ZSTD_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel)
+{
+    size_t result;
+    ZSTD_CCtx ctxBody;
+    memset(&ctxBody, 0, sizeof(ctxBody));
+    ctxBody.customMem = ZSTD_defaultCMem;
+    result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
+    ZSTD_free(ctxBody.workSpace, ZSTD_defaultCMem);  /* can't free ctxBody itself, as it's on stack; free only heap content */
+    return result;
+}
+
+
+/* =====  Dictionary API  ===== */
+
+/*! ZSTD_estimateCDictSize_advanced() :
+ *  Estimate amount of memory that will be needed to create a dictionary with following arguments */
+size_t ZSTD_estimateCDictSize_advanced(
+        size_t dictSize, ZSTD_compressionParameters cParams,
+        ZSTD_dictLoadMethod_e dictLoadMethod)
+{
+    DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (U32)sizeof(ZSTD_CDict));
+    return sizeof(ZSTD_CDict) + HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
+           + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
+}
+
+size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
+{
+    ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
+    return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
+}
+
+size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
+{
+    if (cdict==NULL) return 0;   /* support sizeof on NULL */
+    DEBUGLOG(5, "sizeof(*cdict) : %u", (U32)sizeof(*cdict));
+    return cdict->workspaceSize + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict);
+}
+
+static size_t ZSTD_initCDict_internal(
+                    ZSTD_CDict* cdict,
+              const void* dictBuffer, size_t dictSize,
+                    ZSTD_dictLoadMethod_e dictLoadMethod,
+                    ZSTD_dictContentType_e dictContentType,
+                    ZSTD_compressionParameters cParams)
+{
+    DEBUGLOG(3, "ZSTD_initCDict_internal, dictContentType %u", (U32)dictContentType);
+    assert(!ZSTD_checkCParams(cParams));
+    cdict->cParams = cParams;
+    if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
+        cdict->dictBuffer = NULL;
+        cdict->dictContent = dictBuffer;
+    } else {
+        void* const internalBuffer = ZSTD_malloc(dictSize, cdict->customMem);
+        cdict->dictBuffer = internalBuffer;
+        cdict->dictContent = internalBuffer;
+        if (!internalBuffer) return ERROR(memory_allocation);
+        memcpy(internalBuffer, dictBuffer, dictSize);
+    }
+    cdict->dictContentSize = dictSize;
+
+    /* Reset the state to no dictionary */
+    ZSTD_reset_compressedBlockState(&cdict->cBlockState);
+    {   void* const end = ZSTD_reset_matchState(
+                &cdict->matchState,
+                (U32*)cdict->workspace + HUF_WORKSPACE_SIZE_U32,
+                &cParams, ZSTDcrp_continue, /* forCCtx */ 0);
+        assert(end == (char*)cdict->workspace + cdict->workspaceSize);
+        (void)end;
+    }
+    /* (Maybe) load the dictionary
+     * Skips loading the dictionary if it is <= 8 bytes.
+     */
+    {   ZSTD_CCtx_params params;
+        memset(&params, 0, sizeof(params));
+        params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
+        params.fParams.contentSizeFlag = 1;
+        params.cParams = cParams;
+        {   size_t const dictID = ZSTD_compress_insertDictionary(
+                    &cdict->cBlockState, &cdict->matchState, &params,
+                    cdict->dictContent, cdict->dictContentSize,
+                    dictContentType, cdict->workspace);
+            if (ZSTD_isError(dictID)) return dictID;
+            assert(dictID <= (size_t)(U32)-1);
+            cdict->dictID = (U32)dictID;
+        }
+    }
+
+    return 0;
+}
+
+ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
+                                      ZSTD_dictLoadMethod_e dictLoadMethod,
+                                      ZSTD_dictContentType_e dictContentType,
+                                      ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
+{
+    DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (U32)dictContentType);
+    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
+
+    {   ZSTD_CDict* const cdict = (ZSTD_CDict*)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
+        size_t const workspaceSize = HUF_WORKSPACE_SIZE + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
+        void* const workspace = ZSTD_malloc(workspaceSize, customMem);
+
+        if (!cdict || !workspace) {
+            ZSTD_free(cdict, customMem);
+            ZSTD_free(workspace, customMem);
+            return NULL;
+        }
+        cdict->customMem = customMem;
+        cdict->workspace = workspace;
+        cdict->workspaceSize = workspaceSize;
+        if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
+                                        dictBuffer, dictSize,
+                                        dictLoadMethod, dictContentType,
+                                        cParams) )) {
+            ZSTD_freeCDict(cdict);
+            return NULL;
+        }
+
+        return cdict;
+    }
+}
+
+ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
+{
+    ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
+    return ZSTD_createCDict_advanced(dict, dictSize,
+                                     ZSTD_dlm_byCopy, ZSTD_dct_auto,
+                                     cParams, ZSTD_defaultCMem);
+}
+
+ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
+{
+    ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
+    return ZSTD_createCDict_advanced(dict, dictSize,
+                                     ZSTD_dlm_byRef, ZSTD_dct_auto,
+                                     cParams, ZSTD_defaultCMem);
+}
+
+size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
+{
+    if (cdict==NULL) return 0;   /* support free on NULL */
+    {   ZSTD_customMem const cMem = cdict->customMem;
+        ZSTD_free(cdict->workspace, cMem);
+        ZSTD_free(cdict->dictBuffer, cMem);
+        ZSTD_free(cdict, cMem);
+        return 0;
+    }
+}
+
+/*! ZSTD_initStaticCDict_advanced() :
+ *  Generate a digested dictionary in provided memory area.
+ *  workspace: The memory area to emplace the dictionary into.
+ *             Provided pointer must 8-bytes aligned.
+ *             It must outlive dictionary usage.
+ *  workspaceSize: Use ZSTD_estimateCDictSize()
+ *                 to determine how large workspace must be.
+ *  cParams : use ZSTD_getCParams() to transform a compression level
+ *            into its relevants cParams.
+ * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
+ *  Note : there is no corresponding "free" function.
+ *         Since workspace was allocated externally, it must be freed externally.
+ */
+const ZSTD_CDict* ZSTD_initStaticCDict(
+                                 void* workspace, size_t workspaceSize,
+                           const void* dict, size_t dictSize,
+                                 ZSTD_dictLoadMethod_e dictLoadMethod,
+                                 ZSTD_dictContentType_e dictContentType,
+                                 ZSTD_compressionParameters cParams)
+{
+    size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
+    size_t const neededSize = sizeof(ZSTD_CDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize)
+                            + HUF_WORKSPACE_SIZE + matchStateSize;
+    ZSTD_CDict* const cdict = (ZSTD_CDict*) workspace;
+    void* ptr;
+    if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
+    DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
+        (U32)workspaceSize, (U32)neededSize, (U32)(workspaceSize < neededSize));
+    if (workspaceSize < neededSize) return NULL;
+
+    if (dictLoadMethod == ZSTD_dlm_byCopy) {
+        memcpy(cdict+1, dict, dictSize);
+        dict = cdict+1;
+        ptr = (char*)workspace + sizeof(ZSTD_CDict) + dictSize;
+    } else {
+        ptr = cdict+1;
+    }
+    cdict->workspace = ptr;
+    cdict->workspaceSize = HUF_WORKSPACE_SIZE + matchStateSize;
+
+    if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
+                                              dict, dictSize,
+                                              ZSTD_dlm_byRef, dictContentType,
+                                              cParams) ))
+        return NULL;
+
+    return cdict;
+}
+
+ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
+{
+    assert(cdict != NULL);
+    return cdict->cParams;
+}
+
+/* ZSTD_compressBegin_usingCDict_advanced() :
+ * cdict must be != NULL */
+size_t ZSTD_compressBegin_usingCDict_advanced(
+    ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
+    ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
+{
+    DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced");
+    if (cdict==NULL) return ERROR(dictionary_wrong);
+    {   ZSTD_CCtx_params params = cctx->requestedParams;
+        params.cParams = ZSTD_getCParamsFromCDict(cdict);
+        /* Increase window log to fit the entire dictionary and source if the
+         * source size is known. Limit the increase to 19, which is the
+         * window log for compression level 1 with the largest source size.
+         */
+        if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
+            U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
+            U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
+            params.cParams.windowLog = MAX(params.cParams.windowLog, limitedSrcLog);
+        }
+        params.fParams = fParams;
+        return ZSTD_compressBegin_internal(cctx,
+                                           NULL, 0, ZSTD_dct_auto,
+                                           cdict,
+                                           params, pledgedSrcSize,
+                                           ZSTDb_not_buffered);
+    }
+}
+
+/* ZSTD_compressBegin_usingCDict() :
+ * pledgedSrcSize=0 means "unknown"
+ * if pledgedSrcSize>0, it will enable contentSizeFlag */
+size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
+{
+    ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
+    DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
+    return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, 0);
+}
+
+size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
+                                void* dst, size_t dstCapacity,
+                                const void* src, size_t srcSize,
+                                const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
+{
+    CHECK_F (ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize));   /* will check if cdict != NULL */
+    return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
+}
+
+/*! ZSTD_compress_usingCDict() :
+ *  Compression using a digested Dictionary.
+ *  Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
+ *  Note that compression parameters are decided at CDict creation time
+ *  while frame parameters are hardcoded */
+size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
+                                void* dst, size_t dstCapacity,
+                                const void* src, size_t srcSize,
+                                const ZSTD_CDict* cdict)
+{
+    ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
+    return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
+}
+
+
+
+/* ******************************************************************
+*  Streaming
+********************************************************************/
+
+ZSTD_CStream* ZSTD_createCStream(void)
+{
+    DEBUGLOG(3, "ZSTD_createCStream");
+    return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
+}
+
+ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
+{
+    return ZSTD_initStaticCCtx(workspace, workspaceSize);
+}
+
+ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
+{   /* CStream and CCtx are now same object */
+    return ZSTD_createCCtx_advanced(customMem);
+}
+
+size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
+{
+    return ZSTD_freeCCtx(zcs);   /* same object */
+}
+
+
+
+/*======   Initialization   ======*/
+
+size_t ZSTD_CStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX; }
+
+size_t ZSTD_CStreamOutSize(void)
+{
+    return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
+}
+
+static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx,
+                    const void* const dict, size_t const dictSize, ZSTD_dictContentType_e const dictContentType,
+                    const ZSTD_CDict* const cdict,
+                    ZSTD_CCtx_params const params, unsigned long long const pledgedSrcSize)
+{
+    DEBUGLOG(4, "ZSTD_resetCStream_internal (disableLiteralCompression=%i)",
+                params.disableLiteralCompression);
+    /* params are supposed to be fully validated at this point */
+    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
+
+    CHECK_F( ZSTD_compressBegin_internal(cctx,
+                                         dict, dictSize, dictContentType,
+                                         cdict,
+                                         params, pledgedSrcSize,
+                                         ZSTDb_buffered) );
+
+    cctx->inToCompress = 0;
+    cctx->inBuffPos = 0;
+    cctx->inBuffTarget = cctx->blockSize
+                      + (cctx->blockSize == pledgedSrcSize);   /* for small input: avoid automatic flush on reaching end of block, since it would require to add a 3-bytes null block to end frame */
+    cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
+    cctx->streamStage = zcss_load;
+    cctx->frameEnded = 0;
+    return 0;   /* ready to go */
+}
+
+/* ZSTD_resetCStream():
+ * pledgedSrcSize == 0 means "unknown" */
+size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
+{
+    ZSTD_CCtx_params params = zcs->requestedParams;
+    DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (U32)pledgedSrcSize);
+    if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
+    params.fParams.contentSizeFlag = 1;
+    params.cParams = ZSTD_getCParamsFromCCtxParams(&params, pledgedSrcSize, 0);
+    return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize);
+}
+
+/*! ZSTD_initCStream_internal() :
+ *  Note : for lib/compress only. Used by zstdmt_compress.c.
+ *  Assumption 1 : params are valid
+ *  Assumption 2 : either dict, or cdict, is defined, not both */
+size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
+                    const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
+                    ZSTD_CCtx_params params, unsigned long long pledgedSrcSize)
+{
+    DEBUGLOG(4, "ZSTD_initCStream_internal");
+    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
+
+    if (dict && dictSize >= 8) {
+        DEBUGLOG(4, "loading dictionary of size %u", (U32)dictSize);
+        if (zcs->staticSize) {   /* static CCtx : never uses malloc */
+            /* incompatible with internal cdict creation */
+            return ERROR(memory_allocation);
+        }
+        ZSTD_freeCDict(zcs->cdictLocal);
+        zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
+                                            ZSTD_dlm_byCopy, ZSTD_dct_auto,
+                                            params.cParams, zcs->customMem);
+        zcs->cdict = zcs->cdictLocal;
+        if (zcs->cdictLocal == NULL) return ERROR(memory_allocation);
+    } else {
+        if (cdict) {
+            params.cParams = ZSTD_getCParamsFromCDict(cdict);  /* cParams are enforced from cdict; it includes windowLog */
+        }
+        ZSTD_freeCDict(zcs->cdictLocal);
+        zcs->cdictLocal = NULL;
+        zcs->cdict = cdict;
+    }
+
+    return ZSTD_resetCStream_internal(zcs, NULL, 0, ZSTD_dct_auto, zcs->cdict, params, pledgedSrcSize);
+}
+
+/* ZSTD_initCStream_usingCDict_advanced() :
+ * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
+size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
+                                            const ZSTD_CDict* cdict,
+                                            ZSTD_frameParameters fParams,
+                                            unsigned long long pledgedSrcSize)
+{
+    DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
+    if (!cdict) return ERROR(dictionary_wrong); /* cannot handle NULL cdict (does not know what to do) */
+    {   ZSTD_CCtx_params params = zcs->requestedParams;
+        params.cParams = ZSTD_getCParamsFromCDict(cdict);
+        params.fParams = fParams;
+        return ZSTD_initCStream_internal(zcs,
+                                NULL, 0, cdict,
+                                params, pledgedSrcSize);
+    }
+}
+
+/* note : cdict must outlive compression session */
+size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
+{
+    ZSTD_frameParameters const fParams = { 0 /* contentSizeFlag */, 0 /* checksum */, 0 /* hideDictID */ };
+    DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
+    return ZSTD_initCStream_usingCDict_advanced(zcs, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);  /* note : will check that cdict != NULL */
+}
+
+
+/* ZSTD_initCStream_advanced() :
+ * pledgedSrcSize must be exact.
+ * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
+ * dict is loaded with default parameters ZSTD_dm_auto and ZSTD_dlm_byCopy. */
+size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
+                                 const void* dict, size_t dictSize,
+                                 ZSTD_parameters params, unsigned long long pledgedSrcSize)
+{
+    DEBUGLOG(4, "ZSTD_initCStream_advanced: pledgedSrcSize=%u, flag=%u",
+                (U32)pledgedSrcSize, params.fParams.contentSizeFlag);
+    CHECK_F( ZSTD_checkCParams(params.cParams) );
+    if ((pledgedSrcSize==0) && (params.fParams.contentSizeFlag==0)) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;  /* for compatibility with older programs relying on this behavior. Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. This line will be removed in the future. */
+    {   ZSTD_CCtx_params const cctxParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
+        return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL /*cdict*/, cctxParams, pledgedSrcSize);
+    }
+}
+
+size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
+{
+    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
+    ZSTD_CCtx_params const cctxParams =
+            ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
+    return ZSTD_initCStream_internal(zcs, dict, dictSize, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN);
+}
+
+size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
+{
+    U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;  /* temporary : 0 interpreted as "unknown" during transition period. Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. `0` will be interpreted as "empty" in the future */
+    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0);
+    ZSTD_CCtx_params const cctxParams = ZSTD_assignParamsToCCtxParams(zcs->requestedParams, params);
+    return ZSTD_initCStream_internal(zcs, NULL, 0, NULL, cctxParams, pledgedSrcSize);
+}
+
+size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
+{
+    DEBUGLOG(4, "ZSTD_initCStream");
+    return ZSTD_initCStream_srcSize(zcs, compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN);
+}
+
+/*======   Compression   ======*/
+
+MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity,
+                           const void* src, size_t srcSize)
+{
+    size_t const length = MIN(dstCapacity, srcSize);
+    if (length) memcpy(dst, src, length);
+    return length;
+}
+
+/** ZSTD_compressStream_generic():
+ *  internal function for all *compressStream*() variants and *compress_generic()
+ *  non-static, because can be called from zstdmt_compress.c
+ * @return : hint size for next input */
+size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
+                                   ZSTD_outBuffer* output,
+                                   ZSTD_inBuffer* input,
+                                   ZSTD_EndDirective const flushMode)
+{
+    const char* const istart = (const char*)input->src;
+    const char* const iend = istart + input->size;
+    const char* ip = istart + input->pos;
+    char* const ostart = (char*)output->dst;
+    char* const oend = ostart + output->size;
+    char* op = ostart + output->pos;
+    U32 someMoreWork = 1;
+
+    /* check expectations */
+    DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (U32)flushMode);
+    assert(zcs->inBuff != NULL);
+    assert(zcs->inBuffSize > 0);
+    assert(zcs->outBuff !=  NULL);
+    assert(zcs->outBuffSize > 0);
+    assert(output->pos <= output->size);
+    assert(input->pos <= input->size);
+
+    while (someMoreWork) {
+        switch(zcs->streamStage)
+        {
+        case zcss_init:
+            /* call ZSTD_initCStream() first ! */
+            return ERROR(init_missing);
+
+        case zcss_load:
+            if ( (flushMode == ZSTD_e_end)
+              && ((size_t)(oend-op) >= ZSTD_compressBound(iend-ip))  /* enough dstCapacity */
+              && (zcs->inBuffPos == 0) ) {
+                /* shortcut to compression pass directly into output buffer */
+                size_t const cSize = ZSTD_compressEnd(zcs,
+                                                op, oend-op, ip, iend-ip);
+                DEBUGLOG(4, "ZSTD_compressEnd : %u", (U32)cSize);
+                if (ZSTD_isError(cSize)) return cSize;
+                ip = iend;
+                op += cSize;
+                zcs->frameEnded = 1;
+                ZSTD_startNewCompression(zcs);
+                someMoreWork = 0; break;
+            }
+            /* complete loading into inBuffer */
+            {   size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
+                size_t const loaded = ZSTD_limitCopy(
+                                        zcs->inBuff + zcs->inBuffPos, toLoad,
+                                        ip, iend-ip);
+                zcs->inBuffPos += loaded;
+                ip += loaded;
+                if ( (flushMode == ZSTD_e_continue)
+                  && (zcs->inBuffPos < zcs->inBuffTarget) ) {
+                    /* not enough input to fill full block : stop here */
+                    someMoreWork = 0; break;
+                }
+                if ( (flushMode == ZSTD_e_flush)
+                  && (zcs->inBuffPos == zcs->inToCompress) ) {
+                    /* empty */
+                    someMoreWork = 0; break;
+                }
+            }
+            /* compress current block (note : this stage cannot be stopped in the middle) */
+            DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
+            {   void* cDst;
+                size_t cSize;
+                size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
+                size_t oSize = oend-op;
+                unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
+                if (oSize >= ZSTD_compressBound(iSize))
+                    cDst = op;   /* compress into output buffer, to skip flush stage */
+                else
+                    cDst = zcs->outBuff, oSize = zcs->outBuffSize;
+                cSize = lastBlock ?
+                        ZSTD_compressEnd(zcs, cDst, oSize,
+                                    zcs->inBuff + zcs->inToCompress, iSize) :
+                        ZSTD_compressContinue(zcs, cDst, oSize,
+                                    zcs->inBuff + zcs->inToCompress, iSize);
+                if (ZSTD_isError(cSize)) return cSize;
+                zcs->frameEnded = lastBlock;
+                /* prepare next block */
+                zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
+                if (zcs->inBuffTarget > zcs->inBuffSize)
+                    zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
+                DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
+                         (U32)zcs->inBuffTarget, (U32)zcs->inBuffSize);
+                if (!lastBlock)
+                    assert(zcs->inBuffTarget <= zcs->inBuffSize);
+                zcs->inToCompress = zcs->inBuffPos;
+                if (cDst == op) {  /* no need to flush */
+                    op += cSize;
+                    if (zcs->frameEnded) {
+                        DEBUGLOG(5, "Frame completed directly in outBuffer");
+                        someMoreWork = 0;
+                        ZSTD_startNewCompression(zcs);
+                    }
+                    break;
+                }
+                zcs->outBuffContentSize = cSize;
+                zcs->outBuffFlushedSize = 0;
+                zcs->streamStage = zcss_flush; /* pass-through to flush stage */
+            }
+	    /* fall-through */
+        case zcss_flush:
+            DEBUGLOG(5, "flush stage");
+            {   size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
+                size_t const flushed = ZSTD_limitCopy(op, oend-op,
+                            zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
+                DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
+                            (U32)toFlush, (U32)(oend-op), (U32)flushed);
+                op += flushed;
+                zcs->outBuffFlushedSize += flushed;
+                if (toFlush!=flushed) {
+                    /* flush not fully completed, presumably because dst is too small */
+                    assert(op==oend);
+                    someMoreWork = 0;
+                    break;
+                }
+                zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
+                if (zcs->frameEnded) {
+                    DEBUGLOG(5, "Frame completed on flush");
+                    someMoreWork = 0;
+                    ZSTD_startNewCompression(zcs);
+                    break;
+                }
+                zcs->streamStage = zcss_load;
+                break;
+            }
+
+        default: /* impossible */
+            assert(0);
+        }
+    }
+
+    input->pos = ip - istart;
+    output->pos = op - ostart;
+    if (zcs->frameEnded) return 0;
+    {   size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
+        if (hintInSize==0) hintInSize = zcs->blockSize;
+        return hintInSize;
+    }
+}
+
+size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+    /* check conditions */
+    if (output->pos > output->size) return ERROR(GENERIC);
+    if (input->pos  > input->size)  return ERROR(GENERIC);
+
+    return ZSTD_compressStream_generic(zcs, output, input, ZSTD_e_continue);
+}
+
+
+size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
+                              ZSTD_outBuffer* output,
+                              ZSTD_inBuffer* input,
+                              ZSTD_EndDirective endOp)
+{
+    DEBUGLOG(5, "ZSTD_compress_generic, endOp=%u ", (U32)endOp);
+    /* check conditions */
+    if (output->pos > output->size) return ERROR(GENERIC);
+    if (input->pos  > input->size)  return ERROR(GENERIC);
+    assert(cctx!=NULL);
+
+    /* transparent initialization stage */
+    if (cctx->streamStage == zcss_init) {
+        ZSTD_CCtx_params params = cctx->requestedParams;
+        ZSTD_prefixDict const prefixDict = cctx->prefixDict;
+        memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));  /* single usage */
+        assert(prefixDict.dict==NULL || cctx->cdict==NULL);   /* only one can be set */
+        DEBUGLOG(4, "ZSTD_compress_generic : transparent init stage");
+        if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = input->size + 1;  /* auto-fix pledgedSrcSize */
+        params.cParams = ZSTD_getCParamsFromCCtxParams(
+                &cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/);
+
+#ifdef ZSTD_MULTITHREAD
+        if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) {
+            params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */
+        }
+        if (params.nbWorkers > 0) {
+            /* mt context creation */
+            if (cctx->mtctx == NULL || (params.nbWorkers != ZSTDMT_getNbWorkers(cctx->mtctx))) {
+                DEBUGLOG(4, "ZSTD_compress_generic: creating new mtctx for nbWorkers=%u",
+                            params.nbWorkers);
+                if (cctx->mtctx != NULL)
+                    DEBUGLOG(4, "ZSTD_compress_generic: previous nbWorkers was %u",
+                                ZSTDMT_getNbWorkers(cctx->mtctx));
+                ZSTDMT_freeCCtx(cctx->mtctx);
+                cctx->mtctx = ZSTDMT_createCCtx_advanced(params.nbWorkers, cctx->customMem);
+                if (cctx->mtctx == NULL) return ERROR(memory_allocation);
+            }
+            /* mt compression */
+            DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers);
+            CHECK_F( ZSTDMT_initCStream_internal(
+                        cctx->mtctx,
+                        prefixDict.dict, prefixDict.dictSize, ZSTD_dct_rawContent,
+                        cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) );
+            cctx->streamStage = zcss_load;
+            cctx->appliedParams.nbWorkers = params.nbWorkers;
+        } else
+#endif
+        {   CHECK_F( ZSTD_resetCStream_internal(cctx,
+                            prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType,
+                            cctx->cdict,
+                            params, cctx->pledgedSrcSizePlusOne-1) );
+            assert(cctx->streamStage == zcss_load);
+            assert(cctx->appliedParams.nbWorkers == 0);
+    }   }
+
+    /* compression stage */
+#ifdef ZSTD_MULTITHREAD
+    if (cctx->appliedParams.nbWorkers > 0) {
+        if (cctx->cParamsChanged) {
+            ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams);
+            cctx->cParamsChanged = 0;
+        }
+        {   size_t const flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
+            if ( ZSTD_isError(flushMin)
+              || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
+                ZSTD_startNewCompression(cctx);
+            }
+            return flushMin;
+    }   }
+#endif
+    CHECK_F( ZSTD_compressStream_generic(cctx, output, input, endOp) );
+    DEBUGLOG(5, "completed ZSTD_compress_generic");
+    return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
+}
+
+size_t ZSTD_compress_generic_simpleArgs (
+                            ZSTD_CCtx* cctx,
+                            void* dst, size_t dstCapacity, size_t* dstPos,
+                      const void* src, size_t srcSize, size_t* srcPos,
+                            ZSTD_EndDirective endOp)
+{
+    ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
+    ZSTD_inBuffer  input  = { src, srcSize, *srcPos };
+    /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
+    size_t const cErr = ZSTD_compress_generic(cctx, &output, &input, endOp);
+    *dstPos = output.pos;
+    *srcPos = input.pos;
+    return cErr;
+}
+
+
+/*======   Finalize   ======*/
+
+/*! ZSTD_flushStream() :
+ * @return : amount of data remaining to flush */
+size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
+{
+    ZSTD_inBuffer input = { NULL, 0, 0 };
+    if (output->pos > output->size) return ERROR(GENERIC);
+    CHECK_F( ZSTD_compressStream_generic(zcs, output, &input, ZSTD_e_flush) );
+    return zcs->outBuffContentSize - zcs->outBuffFlushedSize;  /* remaining to flush */
+}
+
+
+size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
+{
+    ZSTD_inBuffer input = { NULL, 0, 0 };
+    if (output->pos > output->size) return ERROR(GENERIC);
+    CHECK_F( ZSTD_compressStream_generic(zcs, output, &input, ZSTD_e_end) );
+    {   size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
+        size_t const checksumSize = zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4;
+        size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize + lastBlockSize + checksumSize;
+        DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (U32)toFlush);
+        return toFlush;
+    }
+}
+
+
+/*-=====  Pre-defined compression levels  =====-*/
+
+#define ZSTD_MAX_CLEVEL     22
+int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
+
+static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
+{   /* "default" - guarantees a monotonically increasing memory budget */
+    /* W,  C,  H,  S,  L, TL, strat */
+    { 19, 12, 13,  1,  6,  1, ZSTD_fast    },  /* base for negative levels */
+    { 19, 13, 14,  1,  7,  1, ZSTD_fast    },  /* level  1 */
+    { 19, 15, 16,  1,  6,  1, ZSTD_fast    },  /* level  2 */
+    { 20, 16, 17,  1,  5,  8, ZSTD_dfast   },  /* level  3 */
+    { 20, 17, 18,  1,  5,  8, ZSTD_dfast   },  /* level  4 */
+    { 20, 17, 18,  2,  5, 16, ZSTD_greedy  },  /* level  5 */
+    { 21, 17, 19,  2,  5, 16, ZSTD_lazy    },  /* level  6 */
+    { 21, 18, 19,  3,  5, 16, ZSTD_lazy    },  /* level  7 */
+    { 21, 18, 20,  3,  5, 16, ZSTD_lazy2   },  /* level  8 */
+    { 21, 19, 20,  3,  5, 16, ZSTD_lazy2   },  /* level  9 */
+    { 21, 19, 21,  4,  5, 16, ZSTD_lazy2   },  /* level 10 */
+    { 22, 20, 22,  4,  5, 16, ZSTD_lazy2   },  /* level 11 */
+    { 22, 20, 22,  5,  5, 16, ZSTD_lazy2   },  /* level 12 */
+    { 22, 21, 22,  4,  5, 32, ZSTD_btlazy2 },  /* level 13 */
+    { 22, 21, 22,  5,  5, 32, ZSTD_btlazy2 },  /* level 14 */
+    { 22, 22, 22,  6,  5, 32, ZSTD_btlazy2 },  /* level 15 */
+    { 22, 21, 22,  4,  5, 48, ZSTD_btopt   },  /* level 16 */
+    { 23, 22, 22,  4,  4, 48, ZSTD_btopt   },  /* level 17 */
+    { 23, 22, 22,  5,  3, 64, ZSTD_btopt   },  /* level 18 */
+    { 23, 23, 22,  7,  3,128, ZSTD_btopt   },  /* level 19 */
+    { 25, 25, 23,  7,  3,128, ZSTD_btultra },  /* level 20 */
+    { 26, 26, 24,  7,  3,256, ZSTD_btultra },  /* level 21 */
+    { 27, 27, 25,  9,  3,512, ZSTD_btultra },  /* level 22 */
+},
+{   /* for srcSize <= 256 KB */
+    /* W,  C,  H,  S,  L,  T, strat */
+    { 18, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
+    { 18, 13, 14,  1,  6,  1, ZSTD_fast    },  /* level  1 */
+    { 18, 14, 13,  1,  5,  8, ZSTD_dfast   },  /* level  2 */
+    { 18, 16, 15,  1,  5,  8, ZSTD_dfast   },  /* level  3 */
+    { 18, 15, 17,  1,  5,  8, ZSTD_greedy  },  /* level  4.*/
+    { 18, 16, 17,  4,  5,  8, ZSTD_greedy  },  /* level  5.*/
+    { 18, 16, 17,  3,  5,  8, ZSTD_lazy    },  /* level  6.*/
+    { 18, 17, 17,  4,  4,  8, ZSTD_lazy    },  /* level  7 */
+    { 18, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
+    { 18, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
+    { 18, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
+    { 18, 18, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 11.*/
+    { 18, 18, 17,  5,  4,  8, ZSTD_btlazy2 },  /* level 12.*/
+    { 18, 19, 17,  7,  4,  8, ZSTD_btlazy2 },  /* level 13 */
+    { 18, 18, 18,  4,  4, 16, ZSTD_btopt   },  /* level 14.*/
+    { 18, 18, 18,  4,  3, 16, ZSTD_btopt   },  /* level 15.*/
+    { 18, 19, 18,  6,  3, 32, ZSTD_btopt   },  /* level 16.*/
+    { 18, 19, 18,  8,  3, 64, ZSTD_btopt   },  /* level 17.*/
+    { 18, 19, 18,  9,  3,128, ZSTD_btopt   },  /* level 18.*/
+    { 18, 19, 18, 10,  3,256, ZSTD_btopt   },  /* level 19.*/
+    { 18, 19, 18, 11,  3,512, ZSTD_btultra },  /* level 20.*/
+    { 18, 19, 18, 12,  3,512, ZSTD_btultra },  /* level 21.*/
+    { 18, 19, 18, 13,  3,512, ZSTD_btultra },  /* level 22.*/
+},
+{   /* for srcSize <= 128 KB */
+    /* W,  C,  H,  S,  L,  T, strat */
+    { 17, 12, 12,  1,  5,  1, ZSTD_fast    },  /* level  0 - not used */
+    { 17, 12, 13,  1,  6,  1, ZSTD_fast    },  /* level  1 */
+    { 17, 13, 16,  1,  5,  1, ZSTD_fast    },  /* level  2 */
+    { 17, 16, 16,  2,  5,  8, ZSTD_dfast   },  /* level  3 */
+    { 17, 13, 15,  3,  4,  8, ZSTD_greedy  },  /* level  4 */
+    { 17, 15, 17,  4,  4,  8, ZSTD_greedy  },  /* level  5 */
+    { 17, 16, 17,  3,  4,  8, ZSTD_lazy    },  /* level  6 */
+    { 17, 15, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  7 */
+    { 17, 17, 17,  4,  4,  8, ZSTD_lazy2   },  /* level  8 */
+    { 17, 17, 17,  5,  4,  8, ZSTD_lazy2   },  /* level  9 */
+    { 17, 17, 17,  6,  4,  8, ZSTD_lazy2   },  /* level 10 */
+    { 17, 17, 17,  7,  4,  8, ZSTD_lazy2   },  /* level 11 */
+    { 17, 17, 17,  8,  4,  8, ZSTD_lazy2   },  /* level 12 */
+    { 17, 18, 17,  6,  4,  8, ZSTD_btlazy2 },  /* level 13.*/
+    { 17, 17, 17,  7,  3,  8, ZSTD_btopt   },  /* level 14.*/
+    { 17, 17, 17,  7,  3, 16, ZSTD_btopt   },  /* level 15.*/
+    { 17, 18, 17,  7,  3, 32, ZSTD_btopt   },  /* level 16.*/
+    { 17, 18, 17,  7,  3, 64, ZSTD_btopt   },  /* level 17.*/
+    { 17, 18, 17,  7,  3,256, ZSTD_btopt   },  /* level 18.*/
+    { 17, 18, 17,  8,  3,256, ZSTD_btopt   },  /* level 19.*/
+    { 17, 18, 17,  9,  3,256, ZSTD_btultra },  /* level 20.*/
+    { 17, 18, 17, 10,  3,256, ZSTD_btultra },  /* level 21.*/
+    { 17, 18, 17, 11,  3,512, ZSTD_btultra },  /* level 22.*/
+},
+{   /* for srcSize <= 16 KB */
+    /* W,  C,  H,  S,  L,  T, strat */
+    { 14, 12, 13,  1,  5,  1, ZSTD_fast    },  /* base for negative levels */
+    { 14, 14, 14,  1,  6,  1, ZSTD_fast    },  /* level  1 */
+    { 14, 14, 14,  1,  4,  1, ZSTD_fast    },  /* level  2 */
+    { 14, 14, 14,  1,  4,  6, ZSTD_dfast   },  /* level  3.*/
+    { 14, 14, 14,  4,  4,  6, ZSTD_greedy  },  /* level  4.*/
+    { 14, 14, 14,  3,  4,  6, ZSTD_lazy    },  /* level  5.*/
+    { 14, 14, 14,  4,  4,  6, ZSTD_lazy2   },  /* level  6 */
+    { 14, 14, 14,  5,  4,  6, ZSTD_lazy2   },  /* level  7 */
+    { 14, 14, 14,  6,  4,  6, ZSTD_lazy2   },  /* level  8.*/
+    { 14, 15, 14,  6,  4,  6, ZSTD_btlazy2 },  /* level  9.*/
+    { 14, 15, 14,  3,  3,  6, ZSTD_btopt   },  /* level 10.*/
+    { 14, 15, 14,  6,  3,  8, ZSTD_btopt   },  /* level 11.*/
+    { 14, 15, 14,  6,  3, 16, ZSTD_btopt   },  /* level 12.*/
+    { 14, 15, 14,  6,  3, 24, ZSTD_btopt   },  /* level 13.*/
+    { 14, 15, 15,  6,  3, 48, ZSTD_btopt   },  /* level 14.*/
+    { 14, 15, 15,  6,  3, 64, ZSTD_btopt   },  /* level 15.*/
+    { 14, 15, 15,  6,  3, 96, ZSTD_btopt   },  /* level 16.*/
+    { 14, 15, 15,  6,  3,128, ZSTD_btopt   },  /* level 17.*/
+    { 14, 15, 15,  6,  3,256, ZSTD_btopt   },  /* level 18.*/
+    { 14, 15, 15,  7,  3,256, ZSTD_btopt   },  /* level 19.*/
+    { 14, 15, 15,  8,  3,256, ZSTD_btultra },  /* level 20.*/
+    { 14, 15, 15,  9,  3,256, ZSTD_btultra },  /* level 21.*/
+    { 14, 15, 15, 10,  3,256, ZSTD_btultra },  /* level 22.*/
+},
+};
+
+/*! ZSTD_getCParams() :
+*  @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
+*   Size values are optional, provide 0 if not known or unused */
+ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
+{
+    size_t const addedSize = srcSizeHint ? 0 : 500;
+    U64 const rSize = srcSizeHint+dictSize ? srcSizeHint+dictSize+addedSize : (U64)-1;
+    U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);   /* intentional underflow for srcSizeHint == 0 */
+    int row = compressionLevel;
+    DEBUGLOG(5, "ZSTD_getCParams (cLevel=%i)", compressionLevel);
+    if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT;   /* 0 == default */
+    if (compressionLevel < 0) row = 0;   /* entry 0 is baseline for fast mode */
+    if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
+    {   ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
+        if (compressionLevel < 0) cp.targetLength = (unsigned)(-compressionLevel);   /* acceleration factor */
+        return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize); }
+
+}
+
+/*! ZSTD_getParams() :
+*   same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
+*   All fields of `ZSTD_frameParameters` are set to default (0) */
+ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
+    ZSTD_parameters params;
+    ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSizeHint, dictSize);
+    DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
+    memset(&params, 0, sizeof(params));
+    params.cParams = cParams;
+    params.fParams.contentSizeFlag = 1;
+    return params;
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_compress_internal.h b/vendor/github.com/DataDog/zstd/zstd_compress_internal.h
new file mode 100644
index 0000000..81f12ca
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_compress_internal.h
@@ -0,0 +1,709 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+/* This header contains definitions
+ * that shall **only** be used by modules within lib/compress.
+ */
+
+#ifndef ZSTD_COMPRESS_H
+#define ZSTD_COMPRESS_H
+
+/*-*************************************
+*  Dependencies
+***************************************/
+#include "zstd_internal.h"
+#ifdef ZSTD_MULTITHREAD
+#  include "zstdmt_compress.h"
+#endif
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*-*************************************
+*  Constants
+***************************************/
+#define kSearchStrength      8
+#define HASH_READ_SIZE       8
+#define ZSTD_DUBT_UNSORTED_MARK 1   /* For btlazy2 strategy, index 1 now means "unsorted".
+                                       It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
+                                       It's not a big deal though : candidate will just be sorted again.
+                                       Additionnally, candidate position 1 will be lost.
+                                       But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
+                                       The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be misdhandled after table re-use with a different strategy */
+
+
+/*-*************************************
+*  Context memory management
+***************************************/
+typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
+typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage;
+
+typedef struct ZSTD_prefixDict_s {
+    const void* dict;
+    size_t dictSize;
+    ZSTD_dictContentType_e dictContentType;
+} ZSTD_prefixDict;
+
+typedef struct {
+    U32 hufCTable[HUF_CTABLE_SIZE_U32(255)];
+    FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
+    FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
+    FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
+    HUF_repeat hufCTable_repeatMode;
+    FSE_repeat offcode_repeatMode;
+    FSE_repeat matchlength_repeatMode;
+    FSE_repeat litlength_repeatMode;
+} ZSTD_entropyCTables_t;
+
+typedef struct {
+    U32 off;
+    U32 len;
+} ZSTD_match_t;
+
+typedef struct {
+    int price;
+    U32 off;
+    U32 mlen;
+    U32 litlen;
+    U32 rep[ZSTD_REP_NUM];
+} ZSTD_optimal_t;
+
+typedef struct {
+    /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
+    U32* litFreq;               /* table of literals statistics, of size 256 */
+    U32* litLengthFreq;         /* table of litLength statistics, of size (MaxLL+1) */
+    U32* matchLengthFreq;       /* table of matchLength statistics, of size (MaxML+1) */
+    U32* offCodeFreq;           /* table of offCode statistics, of size (MaxOff+1) */
+    ZSTD_match_t* matchTable;   /* list of found matches, of size ZSTD_OPT_NUM+1 */
+    ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */
+
+    U32  litSum;                 /* nb of literals */
+    U32  litLengthSum;           /* nb of litLength codes */
+    U32  matchLengthSum;         /* nb of matchLength codes */
+    U32  offCodeSum;             /* nb of offset codes */
+    /* begin updated by ZSTD_setLog2Prices */
+    U32  log2litSum;             /* pow2 to compare log2(litfreq) to */
+    U32  log2litLengthSum;       /* pow2 to compare log2(llfreq) to */
+    U32  log2matchLengthSum;     /* pow2 to compare log2(mlfreq) to */
+    U32  log2offCodeSum;         /* pow2 to compare log2(offreq) to */
+    /* end : updated by ZSTD_setLog2Prices */
+    U32  staticPrices;           /* prices follow a pre-defined cost structure, statistics are irrelevant */
+} optState_t;
+
+typedef struct {
+  ZSTD_entropyCTables_t entropy;
+  U32 rep[ZSTD_REP_NUM];
+} ZSTD_compressedBlockState_t;
+
+typedef struct {
+    BYTE const* nextSrc;    /* next block here to continue on current prefix */
+    BYTE const* base;       /* All regular indexes relative to this position */
+    BYTE const* dictBase;   /* extDict indexes relative to this position */
+    U32 dictLimit;          /* below that point, need extDict */
+    U32 lowLimit;           /* below that point, no more data */
+} ZSTD_window_t;
+
+typedef struct {
+    ZSTD_window_t window;      /* State for window round buffer management */
+    U32 loadedDictEnd;         /* index of end of dictionary */
+    U32 nextToUpdate;          /* index from which to continue table update */
+    U32 nextToUpdate3;         /* index from which to continue table update */
+    U32 hashLog3;              /* dispatch table : larger == faster, more memory */
+    U32* hashTable;
+    U32* hashTable3;
+    U32* chainTable;
+    optState_t opt;         /* optimal parser state */
+} ZSTD_matchState_t;
+
+typedef struct {
+    ZSTD_compressedBlockState_t* prevCBlock;
+    ZSTD_compressedBlockState_t* nextCBlock;
+    ZSTD_matchState_t matchState;
+} ZSTD_blockState_t;
+
+typedef struct {
+    U32 offset;
+    U32 checksum;
+} ldmEntry_t;
+
+typedef struct {
+    ZSTD_window_t window;   /* State for the window round buffer management */
+    ldmEntry_t* hashTable;
+    BYTE* bucketOffsets;    /* Next position in bucket to insert entry */
+    U64 hashPower;          /* Used to compute the rolling hash.
+                             * Depends on ldmParams.minMatchLength */
+} ldmState_t;
+
+typedef struct {
+    U32 enableLdm;          /* 1 if enable long distance matching */
+    U32 hashLog;            /* Log size of hashTable */
+    U32 bucketSizeLog;      /* Log bucket size for collision resolution, at most 8 */
+    U32 minMatchLength;     /* Minimum match length */
+    U32 hashEveryLog;       /* Log number of entries to skip */
+    U32 windowLog;          /* Window log for the LDM */
+} ldmParams_t;
+
+typedef struct {
+    U32 offset;
+    U32 litLength;
+    U32 matchLength;
+} rawSeq;
+
+typedef struct {
+  rawSeq* seq;     /* The start of the sequences */
+  size_t pos;      /* The position where reading stopped. <= size. */
+  size_t size;     /* The number of sequences. <= capacity. */
+  size_t capacity; /* The capacity of the `seq` pointer */
+} rawSeqStore_t;
+
+struct ZSTD_CCtx_params_s {
+    ZSTD_format_e format;
+    ZSTD_compressionParameters cParams;
+    ZSTD_frameParameters fParams;
+
+    int compressionLevel;
+    int disableLiteralCompression;
+    int forceWindow;           /* force back-references to respect limit of
+                                * 1<<wLog, even for dictionary */
+
+    /* Multithreading: used to pass parameters to mtctx */
+    unsigned nbWorkers;
+    unsigned jobSize;
+    unsigned overlapSizeLog;
+
+    /* Long distance matching parameters */
+    ldmParams_t ldmParams;
+
+    /* Internal use, for createCCtxParams() and freeCCtxParams() only */
+    ZSTD_customMem customMem;
+};  /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
+
+struct ZSTD_CCtx_s {
+    ZSTD_compressionStage_e stage;
+    int cParamsChanged;                  /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
+    int bmi2;                            /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
+    ZSTD_CCtx_params requestedParams;
+    ZSTD_CCtx_params appliedParams;
+    U32   dictID;
+    void* workSpace;
+    size_t workSpaceSize;
+    size_t blockSize;
+    unsigned long long pledgedSrcSizePlusOne;  /* this way, 0 (default) == unknown */
+    unsigned long long consumedSrcSize;
+    unsigned long long producedCSize;
+    XXH64_state_t xxhState;
+    ZSTD_customMem customMem;
+    size_t staticSize;
+
+    seqStore_t seqStore;      /* sequences storage ptrs */
+    ldmState_t ldmState;      /* long distance matching state */
+    rawSeq* ldmSequences;     /* Storage for the ldm output sequences */
+    size_t maxNbLdmSequences;
+    rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
+    ZSTD_blockState_t blockState;
+    U32* entropyWorkspace;  /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
+
+    /* streaming */
+    char*  inBuff;
+    size_t inBuffSize;
+    size_t inToCompress;
+    size_t inBuffPos;
+    size_t inBuffTarget;
+    char*  outBuff;
+    size_t outBuffSize;
+    size_t outBuffContentSize;
+    size_t outBuffFlushedSize;
+    ZSTD_cStreamStage streamStage;
+    U32    frameEnded;
+
+    /* Dictionary */
+    ZSTD_CDict* cdictLocal;
+    const ZSTD_CDict* cdict;
+    ZSTD_prefixDict prefixDict;   /* single-usage dictionary */
+
+    /* Multi-threading */
+#ifdef ZSTD_MULTITHREAD
+    ZSTDMT_CCtx* mtctx;
+#endif
+};
+
+
+typedef size_t (*ZSTD_blockCompressor) (
+        ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict);
+
+
+MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
+{
+    static const BYTE LL_Code[64] = {  0,  1,  2,  3,  4,  5,  6,  7,
+                                       8,  9, 10, 11, 12, 13, 14, 15,
+                                      16, 16, 17, 17, 18, 18, 19, 19,
+                                      20, 20, 20, 20, 21, 21, 21, 21,
+                                      22, 22, 22, 22, 22, 22, 22, 22,
+                                      23, 23, 23, 23, 23, 23, 23, 23,
+                                      24, 24, 24, 24, 24, 24, 24, 24,
+                                      24, 24, 24, 24, 24, 24, 24, 24 };
+    static const U32 LL_deltaCode = 19;
+    return (litLength > 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
+}
+
+/* ZSTD_MLcode() :
+ * note : mlBase = matchLength - MINMATCH;
+ *        because it's the format it's stored in seqStore->sequences */
+MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
+{
+    static const BYTE ML_Code[128] = { 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
+                                      16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+                                      32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
+                                      38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
+                                      40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+                                      41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
+                                      42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
+                                      42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
+    static const U32 ML_deltaCode = 36;
+    return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
+}
+
+/*! ZSTD_storeSeq() :
+ *  Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
+ *  `offsetCode` : distance to match + 3 (values 1-3 are repCodes).
+ *  `mlBase` : matchLength - MINMATCH
+*/
+MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t mlBase)
+{
+#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG >= 6)
+    static const BYTE* g_start = NULL;
+    if (g_start==NULL) g_start = (const BYTE*)literals;  /* note : index only works for compression within a single segment */
+    {   U32 const pos = (U32)((const BYTE*)literals - g_start);
+        DEBUGLOG(6, "Cpos%7u :%3u literals, match%3u bytes at dist.code%7u",
+               pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offsetCode);
+    }
+#endif
+    /* copy Literals */
+    assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + 128 KB);
+    ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
+    seqStorePtr->lit += litLength;
+
+    /* literal Length */
+    if (litLength>0xFFFF) {
+        assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
+        seqStorePtr->longLengthID = 1;
+        seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+    }
+    seqStorePtr->sequences[0].litLength = (U16)litLength;
+
+    /* match offset */
+    seqStorePtr->sequences[0].offset = offsetCode + 1;
+
+    /* match Length */
+    if (mlBase>0xFFFF) {
+        assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
+        seqStorePtr->longLengthID = 2;
+        seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
+    }
+    seqStorePtr->sequences[0].matchLength = (U16)mlBase;
+
+    seqStorePtr->sequences++;
+}
+
+
+/*-*************************************
+*  Match length counter
+***************************************/
+static unsigned ZSTD_NbCommonBytes (size_t val)
+{
+    if (MEM_isLittleEndian()) {
+        if (MEM_64bits()) {
+#       if defined(_MSC_VER) && defined(_WIN64)
+            unsigned long r = 0;
+            _BitScanForward64( &r, (U64)val );
+            return (unsigned)(r>>3);
+#       elif defined(__GNUC__) && (__GNUC__ >= 4)
+            return (__builtin_ctzll((U64)val) >> 3);
+#       else
+            static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
+                                                     0, 3, 1, 3, 1, 4, 2, 7,
+                                                     0, 2, 3, 6, 1, 5, 3, 5,
+                                                     1, 3, 4, 4, 2, 5, 6, 7,
+                                                     7, 0, 1, 2, 3, 3, 4, 6,
+                                                     2, 6, 5, 5, 3, 4, 5, 6,
+                                                     7, 1, 2, 4, 6, 4, 4, 5,
+                                                     7, 2, 6, 5, 7, 6, 7, 7 };
+            return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
+#       endif
+        } else { /* 32 bits */
+#       if defined(_MSC_VER)
+            unsigned long r=0;
+            _BitScanForward( &r, (U32)val );
+            return (unsigned)(r>>3);
+#       elif defined(__GNUC__) && (__GNUC__ >= 3)
+            return (__builtin_ctz((U32)val) >> 3);
+#       else
+            static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
+                                                     3, 2, 2, 1, 3, 2, 0, 1,
+                                                     3, 3, 1, 2, 2, 2, 2, 0,
+                                                     3, 1, 2, 0, 1, 0, 1, 1 };
+            return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
+#       endif
+        }
+    } else {  /* Big Endian CPU */
+        if (MEM_64bits()) {
+#       if defined(_MSC_VER) && defined(_WIN64)
+            unsigned long r = 0;
+            _BitScanReverse64( &r, val );
+            return (unsigned)(r>>3);
+#       elif defined(__GNUC__) && (__GNUC__ >= 4)
+            return (__builtin_clzll(val) >> 3);
+#       else
+            unsigned r;
+            const unsigned n32 = sizeof(size_t)*4;   /* calculate this way due to compiler complaining in 32-bits mode */
+            if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
+            if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
+            r += (!val);
+            return r;
+#       endif
+        } else { /* 32 bits */
+#       if defined(_MSC_VER)
+            unsigned long r = 0;
+            _BitScanReverse( &r, (unsigned long)val );
+            return (unsigned)(r>>3);
+#       elif defined(__GNUC__) && (__GNUC__ >= 3)
+            return (__builtin_clz((U32)val) >> 3);
+#       else
+            unsigned r;
+            if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
+            r += (!val);
+            return r;
+#       endif
+    }   }
+}
+
+
+MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
+{
+    const BYTE* const pStart = pIn;
+    const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
+
+    if (pIn < pInLoopLimit) {
+        { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
+          if (diff) return ZSTD_NbCommonBytes(diff); }
+        pIn+=sizeof(size_t); pMatch+=sizeof(size_t);
+        while (pIn < pInLoopLimit) {
+            size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
+            if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
+            pIn += ZSTD_NbCommonBytes(diff);
+            return (size_t)(pIn - pStart);
+    }   }
+    if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
+    if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
+    if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
+    return (size_t)(pIn - pStart);
+}
+
+/** ZSTD_count_2segments() :
+ *  can count match length with `ip` & `match` in 2 different segments.
+ *  convention : on reaching mEnd, match count continue starting from iStart
+ */
+MEM_STATIC size_t
+ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
+                     const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
+{
+    const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
+    size_t const matchLength = ZSTD_count(ip, match, vEnd);
+    if (match + matchLength != mEnd) return matchLength;
+    return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
+}
+
+
+/*-*************************************
+ *  Hashes
+ ***************************************/
+static const U32 prime3bytes = 506832829U;
+static U32    ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes)  >> (32-h) ; }
+MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
+
+static const U32 prime4bytes = 2654435761U;
+static U32    ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
+static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
+
+static const U64 prime5bytes = 889523592379ULL;
+static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u  << (64-40)) * prime5bytes) >> (64-h)) ; }
+static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
+
+static const U64 prime6bytes = 227718039650203ULL;
+static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u  << (64-48)) * prime6bytes) >> (64-h)) ; }
+static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
+
+static const U64 prime7bytes = 58295818150454627ULL;
+static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u  << (64-56)) * prime7bytes) >> (64-h)) ; }
+static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
+
+static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
+static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
+static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
+
+MEM_STATIC size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
+{
+    switch(mls)
+    {
+    default:
+    case 4: return ZSTD_hash4Ptr(p, hBits);
+    case 5: return ZSTD_hash5Ptr(p, hBits);
+    case 6: return ZSTD_hash6Ptr(p, hBits);
+    case 7: return ZSTD_hash7Ptr(p, hBits);
+    case 8: return ZSTD_hash8Ptr(p, hBits);
+    }
+}
+
+/*-*************************************
+*  Round buffer management
+***************************************/
+/* Max current allowed */
+#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
+/* Maximum chunk size before overflow correction needs to be called again */
+#define ZSTD_CHUNKSIZE_MAX                                                     \
+    ( ((U32)-1)                  /* Maximum ending current index */            \
+    - ZSTD_CURRENT_MAX)          /* Maximum beginning lowLimit */
+
+/**
+ * ZSTD_window_clear():
+ * Clears the window containing the history by simply setting it to empty.
+ */
+MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)
+{
+    size_t const endT = (size_t)(window->nextSrc - window->base);
+    U32 const end = (U32)endT;
+
+    window->lowLimit = end;
+    window->dictLimit = end;
+}
+
+/**
+ * ZSTD_window_hasExtDict():
+ * Returns non-zero if the window has a non-empty extDict.
+ */
+MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)
+{
+    return window.lowLimit < window.dictLimit;
+}
+
+/**
+ * ZSTD_window_needOverflowCorrection():
+ * Returns non-zero if the indices are getting too large and need overflow
+ * protection.
+ */
+MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
+                                                  void const* srcEnd)
+{
+    U32 const current = (U32)((BYTE const*)srcEnd - window.base);
+    return current > ZSTD_CURRENT_MAX;
+}
+
+/**
+ * ZSTD_window_correctOverflow():
+ * Reduces the indices to protect from index overflow.
+ * Returns the correction made to the indices, which must be applied to every
+ * stored index.
+ *
+ * The least significant cycleLog bits of the indices must remain the same,
+ * which may be 0. Every index up to maxDist in the past must be valid.
+ * NOTE: (maxDist & cycleMask) must be zero.
+ */
+MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
+                                           U32 maxDist, void const* src)
+{
+    /* preemptive overflow correction:
+     * 1. correction is large enough:
+     *    lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog
+     *    1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog
+     *
+     *    current - newCurrent
+     *    > (3<<29 + 1<<windowLog) - (1<<windowLog + 1<<chainLog)
+     *    > (3<<29) - (1<<chainLog)
+     *    > (3<<29) - (1<<30)             (NOTE: chainLog <= 30)
+     *    > 1<<29
+     *
+     * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow:
+     *    After correction, current is less than (1<<chainLog + 1<<windowLog).
+     *    In 64-bit mode we are safe, because we have 64-bit ptrdiff_t.
+     *    In 32-bit mode we are safe, because (chainLog <= 29), so
+     *    ip+ZSTD_CHUNKSIZE_MAX - cctx->base < 1<<32.
+     * 3. (cctx->lowLimit + 1<<windowLog) < 1<<32:
+     *    windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
+     */
+    U32 const cycleMask = (1U << cycleLog) - 1;
+    U32 const current = (U32)((BYTE const*)src - window->base);
+    U32 const newCurrent = (current & cycleMask) + maxDist;
+    U32 const correction = current - newCurrent;
+    assert((maxDist & cycleMask) == 0);
+    assert(current > newCurrent);
+    /* Loose bound, should be around 1<<29 (see above) */
+    assert(correction > 1<<28);
+
+    window->base += correction;
+    window->dictBase += correction;
+    window->lowLimit -= correction;
+    window->dictLimit -= correction;
+
+    DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
+             window->lowLimit);
+    return correction;
+}
+
+/**
+ * ZSTD_window_enforceMaxDist():
+ * Updates lowLimit so that:
+ *    (srcEnd - base) - lowLimit == maxDist + loadedDictEnd
+ * This allows a simple check that index >= lowLimit to see if index is valid.
+ * This must be called before a block compression call, with srcEnd as the block
+ * source end.
+ * If loadedDictEndPtr is not NULL, we set it to zero once we update lowLimit.
+ * This is because dictionaries are allowed to be referenced as long as the last
+ * byte of the dictionary is in the window, but once they are out of range,
+ * they cannot be referenced. If loadedDictEndPtr is NULL, we use
+ * loadedDictEnd == 0.
+ */
+MEM_STATIC void ZSTD_window_enforceMaxDist(ZSTD_window_t* window,
+                                           void const* srcEnd, U32 maxDist,
+                                           U32* loadedDictEndPtr)
+{
+    U32 const current = (U32)((BYTE const*)srcEnd - window->base);
+    U32 loadedDictEnd = loadedDictEndPtr != NULL ? *loadedDictEndPtr : 0;
+    if (current > maxDist + loadedDictEnd) {
+        U32 const newLowLimit = current - maxDist;
+        if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
+        if (window->dictLimit < window->lowLimit) {
+            DEBUGLOG(5, "Update dictLimit from %u to %u", window->dictLimit,
+                     window->lowLimit);
+            window->dictLimit = window->lowLimit;
+        }
+        if (loadedDictEndPtr)
+            *loadedDictEndPtr = 0;
+    }
+}
+
+/**
+ * ZSTD_window_update():
+ * Updates the window by appending [src, src + srcSize) to the window.
+ * If it is not contiguous, the current prefix becomes the extDict, and we
+ * forget about the extDict. Handles overlap of the prefix and extDict.
+ * Returns non-zero if the segment is contiguous.
+ */
+MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
+                                  void const* src, size_t srcSize)
+{
+    BYTE const* const ip = (BYTE const*)src;
+    U32 contiguous = 1;
+    /* Check if blocks follow each other */
+    if (src != window->nextSrc) {
+        /* not contiguous */
+        size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
+        DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u",
+                 window->dictLimit);
+        window->lowLimit = window->dictLimit;
+        assert(distanceFromBase == (size_t)(U32)distanceFromBase);  /* should never overflow */
+        window->dictLimit = (U32)distanceFromBase;
+        window->dictBase = window->base;
+        window->base = ip - distanceFromBase;
+        // ms->nextToUpdate = window->dictLimit;
+        if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit;   /* too small extDict */
+        contiguous = 0;
+    }
+    window->nextSrc = ip + srcSize;
+    /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
+    if ( (ip+srcSize > window->dictBase + window->lowLimit)
+       & (ip < window->dictBase + window->dictLimit)) {
+        ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
+        U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
+        window->lowLimit = lowLimitMax;
+    }
+    return contiguous;
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+
+/* ==============================================================
+ * Private declarations
+ * These prototypes shall only be called from within lib/compress
+ * ============================================================== */
+
+/* ZSTD_getCParamsFromCCtxParams() :
+ * cParams are built depending on compressionLevel, src size hints, 
+ * LDM and manually set compression parameters.
+ */
+ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
+        const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize);
+
+/*! ZSTD_initCStream_internal() :
+ *  Private use only. Init streaming operation.
+ *  expects params to be valid.
+ *  must receive dict, or cdict, or none, but not both.
+ *  @return : 0, or an error code */
+size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
+                     const void* dict, size_t dictSize,
+                     const ZSTD_CDict* cdict,
+                     ZSTD_CCtx_params  params, unsigned long long pledgedSrcSize);
+
+/*! ZSTD_compressStream_generic() :
+ *  Private use only. To be called from zstdmt_compress.c in single-thread mode. */
+size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
+                                   ZSTD_outBuffer* output,
+                                   ZSTD_inBuffer* input,
+                                   ZSTD_EndDirective const flushMode);
+
+/*! ZSTD_getCParamsFromCDict() :
+ *  as the name implies */
+ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
+
+/* ZSTD_compressBegin_advanced_internal() :
+ * Private use only. To be called from zstdmt_compress.c. */
+size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
+                                    const void* dict, size_t dictSize,
+                                    ZSTD_dictContentType_e dictContentType,
+                                    const ZSTD_CDict* cdict,
+                                    ZSTD_CCtx_params params,
+                                    unsigned long long pledgedSrcSize);
+
+/* ZSTD_compress_advanced_internal() :
+ * Private use only. To be called from zstdmt_compress.c. */
+size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
+                                       void* dst, size_t dstCapacity,
+                                 const void* src, size_t srcSize,
+                                 const void* dict,size_t dictSize,
+                                 ZSTD_CCtx_params params);
+
+
+/* ZSTD_writeLastEmptyBlock() :
+ * output an empty Block with end-of-frame mark to complete a frame
+ * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
+ *           or an error code if `dstCapcity` is too small (<ZSTD_blockHeaderSize)
+ */
+size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
+
+
+/* ZSTD_referenceExternalSequences() :
+ * Must be called before starting a compression operation.
+ * seqs must parse a prefix of the source.
+ * This cannot be used when long range matching is enabled.
+ * Zstd will use these sequences, and pass the literals to a secondary block
+ * compressor.
+ * @return : An error code on failure.
+ * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory
+ * access and data corruption.
+ */
+size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
+
+
+#endif /* ZSTD_COMPRESS_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_decompress.c b/vendor/github.com/DataDog/zstd/zstd_decompress.c
new file mode 100644
index 0000000..3ec6a1c
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_decompress.c
@@ -0,0 +1,3003 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/* ***************************************************************
+*  Tuning parameters
+*****************************************************************/
+/*!
+ * HEAPMODE :
+ * Select how default decompression function ZSTD_decompress() allocates its context,
+ * on stack (0), or into heap (1, default; requires malloc()).
+ * Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected.
+ */
+#ifndef ZSTD_HEAPMODE
+#  define ZSTD_HEAPMODE 1
+#endif
+
+/*!
+*  LEGACY_SUPPORT :
+*  if set to 1+, ZSTD_decompress() can decode older formats (v0.1+)
+*/
+#ifndef ZSTD_LEGACY_SUPPORT
+#  define ZSTD_LEGACY_SUPPORT 0
+#endif
+
+/*!
+ *  MAXWINDOWSIZE_DEFAULT :
+ *  maximum window size accepted by DStream __by default__.
+ *  Frames requiring more memory will be rejected.
+ *  It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize().
+ */
+#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
+#  define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_DEFAULTMAX) + 1)
+#endif
+
+
+/*-*******************************************************
+*  Dependencies
+*********************************************************/
+#include <string.h>      /* memcpy, memmove, memset */
+#include "cpu.h"
+#include "mem.h"         /* low level memory routines */
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#include "zstd_internal.h"
+
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
+#  include "zstd_legacy.h"
+#endif
+
+
+/*-*************************************
+*  Errors
+***************************************/
+#define ZSTD_isError ERR_isError   /* for inlining */
+#define FSE_isError  ERR_isError
+#define HUF_isError  ERR_isError
+
+
+/*_*******************************************************
+*  Memory operations
+**********************************************************/
+static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
+
+
+/*-*************************************************************
+*   Context management
+***************************************************************/
+typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
+               ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,
+               ZSTDds_decompressLastBlock, ZSTDds_checkChecksum,
+               ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage;
+
+typedef enum { zdss_init=0, zdss_loadHeader,
+               zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
+
+
+typedef struct {
+    U32 fastMode;
+    U32 tableLog;
+} ZSTD_seqSymbol_header;
+
+typedef struct {
+    U16  nextState;
+    BYTE nbAdditionalBits;
+    BYTE nbBits;
+    U32  baseValue;
+} ZSTD_seqSymbol;
+
+#define SEQSYMBOL_TABLE_SIZE(log)   (1 + (1 << (log)))
+
+typedef struct {
+    ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)];
+    ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)];
+    ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)];
+    HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)];  /* can accommodate HUF_decompress4X */
+    U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
+    U32 rep[ZSTD_REP_NUM];
+} ZSTD_entropyDTables_t;
+
+struct ZSTD_DCtx_s
+{
+    const ZSTD_seqSymbol* LLTptr;
+    const ZSTD_seqSymbol* MLTptr;
+    const ZSTD_seqSymbol* OFTptr;
+    const HUF_DTable* HUFptr;
+    ZSTD_entropyDTables_t entropy;
+    const void* previousDstEnd;   /* detect continuity */
+    const void* base;             /* start of current segment */
+    const void* vBase;            /* virtual start of previous segment if it was just before current one */
+    const void* dictEnd;          /* end of previous segment */
+    size_t expected;
+    ZSTD_frameHeader fParams;
+    U64 decodedSize;
+    blockType_e bType;            /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
+    ZSTD_dStage stage;
+    U32 litEntropy;
+    U32 fseEntropy;
+    XXH64_state_t xxhState;
+    size_t headerSize;
+    U32 dictID;
+    ZSTD_format_e format;
+    const BYTE* litPtr;
+    ZSTD_customMem customMem;
+    size_t litSize;
+    size_t rleSize;
+    size_t staticSize;
+    int bmi2;                     /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
+
+    /* streaming */
+    ZSTD_DDict* ddictLocal;
+    const ZSTD_DDict* ddict;
+    ZSTD_dStreamStage streamStage;
+    char*  inBuff;
+    size_t inBuffSize;
+    size_t inPos;
+    size_t maxWindowSize;
+    char*  outBuff;
+    size_t outBuffSize;
+    size_t outStart;
+    size_t outEnd;
+    size_t lhSize;
+    void* legacyContext;
+    U32 previousLegacyVersion;
+    U32 legacyVersion;
+    U32 hostageByte;
+
+    /* workspace */
+    BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
+    BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
+};  /* typedef'd to ZSTD_DCtx within "zstd.h" */
+
+size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx)
+{
+    if (dctx==NULL) return 0;   /* support sizeof NULL */
+    return sizeof(*dctx)
+           + ZSTD_sizeof_DDict(dctx->ddictLocal)
+           + dctx->inBuffSize + dctx->outBuffSize;
+}
+
+size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); }
+
+
+static size_t ZSTD_startingInputLength(ZSTD_format_e format)
+{
+    size_t const startingInputLength = (format==ZSTD_f_zstd1_magicless) ?
+                    ZSTD_frameHeaderSize_prefix - ZSTD_frameIdSize :
+                    ZSTD_frameHeaderSize_prefix;
+    ZSTD_STATIC_ASSERT(ZSTD_FRAMEHEADERSIZE_PREFIX >= ZSTD_FRAMEIDSIZE);
+    /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
+    assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );
+    return startingInputLength;
+}
+
+static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
+{
+    dctx->format = ZSTD_f_zstd1;  /* ZSTD_decompressBegin() invokes ZSTD_startingInputLength() with argument dctx->format */
+    dctx->staticSize  = 0;
+    dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
+    dctx->ddict       = NULL;
+    dctx->ddictLocal  = NULL;
+    dctx->inBuff      = NULL;
+    dctx->inBuffSize  = 0;
+    dctx->outBuffSize = 0;
+    dctx->streamStage = zdss_init;
+    dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
+}
+
+ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)
+{
+    ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace;
+
+    if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
+    if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL;  /* minimum size */
+
+    ZSTD_initDCtx_internal(dctx);
+    dctx->staticSize = workspaceSize;
+    dctx->inBuff = (char*)(dctx+1);
+    return dctx;
+}
+
+ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
+{
+    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
+
+    {   ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_malloc(sizeof(*dctx), customMem);
+        if (!dctx) return NULL;
+        dctx->customMem = customMem;
+        dctx->legacyContext = NULL;
+        dctx->previousLegacyVersion = 0;
+        ZSTD_initDCtx_internal(dctx);
+        return dctx;
+    }
+}
+
+ZSTD_DCtx* ZSTD_createDCtx(void)
+{
+    DEBUGLOG(3, "ZSTD_createDCtx");
+    return ZSTD_createDCtx_advanced(ZSTD_defaultCMem);
+}
+
+size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
+{
+    if (dctx==NULL) return 0;   /* support free on NULL */
+    if (dctx->staticSize) return ERROR(memory_allocation);   /* not compatible with static DCtx */
+    {   ZSTD_customMem const cMem = dctx->customMem;
+        ZSTD_freeDDict(dctx->ddictLocal);
+        dctx->ddictLocal = NULL;
+        ZSTD_free(dctx->inBuff, cMem);
+        dctx->inBuff = NULL;
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+        if (dctx->legacyContext)
+            ZSTD_freeLegacyStreamContext(dctx->legacyContext, dctx->previousLegacyVersion);
+#endif
+        ZSTD_free(dctx, cMem);
+        return 0;
+    }
+}
+
+/* no longer useful */
+void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
+{
+    size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx);
+    memcpy(dstDCtx, srcDCtx, toCopy);  /* no need to copy workspace */
+}
+
+
+/*-*************************************************************
+ *   Frame header decoding
+ ***************************************************************/
+
+/*! ZSTD_isFrame() :
+ *  Tells if the content of `buffer` starts with a valid Frame Identifier.
+ *  Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
+ *  Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
+ *  Note 3 : Skippable Frame Identifiers are considered valid. */
+unsigned ZSTD_isFrame(const void* buffer, size_t size)
+{
+    if (size < ZSTD_frameIdSize) return 0;
+    {   U32 const magic = MEM_readLE32(buffer);
+        if (magic == ZSTD_MAGICNUMBER) return 1;
+        if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
+    }
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+    if (ZSTD_isLegacy(buffer, size)) return 1;
+#endif
+    return 0;
+}
+
+/** ZSTD_frameHeaderSize_internal() :
+ *  srcSize must be large enough to reach header size fields.
+ *  note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless.
+ * @return : size of the Frame Header
+ *           or an error code, which can be tested with ZSTD_isError() */
+static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
+{
+    size_t const minInputSize = ZSTD_startingInputLength(format);
+    if (srcSize < minInputSize) return ERROR(srcSize_wrong);
+
+    {   BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
+        U32 const dictID= fhd & 3;
+        U32 const singleSegment = (fhd >> 5) & 1;
+        U32 const fcsId = fhd >> 6;
+        return minInputSize + !singleSegment
+             + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId]
+             + (singleSegment && !fcsId);
+    }
+}
+
+/** ZSTD_frameHeaderSize() :
+ *  srcSize must be >= ZSTD_frameHeaderSize_prefix.
+ * @return : size of the Frame Header */
+size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
+{
+    return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1);
+}
+
+
+/** ZSTD_getFrameHeader_internal() :
+ *  decode Frame Header, or require larger `srcSize`.
+ *  note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
+ * @return : 0, `zfhPtr` is correctly filled,
+ *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ *           or an error code, which can be tested using ZSTD_isError() */
+static size_t ZSTD_getFrameHeader_internal(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
+{
+    const BYTE* ip = (const BYTE*)src;
+    size_t const minInputSize = ZSTD_startingInputLength(format);
+
+    if (srcSize < minInputSize) return minInputSize;
+
+    if ( (format != ZSTD_f_zstd1_magicless)
+      && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
+        if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
+            /* skippable frame */
+            if (srcSize < ZSTD_skippableHeaderSize)
+                return ZSTD_skippableHeaderSize; /* magic number + frame length */
+            memset(zfhPtr, 0, sizeof(*zfhPtr));
+            zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_frameIdSize);
+            zfhPtr->frameType = ZSTD_skippableFrame;
+            return 0;
+        }
+        return ERROR(prefix_unknown);
+    }
+
+    /* ensure there is enough `srcSize` to fully read/decode frame header */
+    {   size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format);
+        if (srcSize < fhsize) return fhsize;
+        zfhPtr->headerSize = (U32)fhsize;
+    }
+
+    {   BYTE const fhdByte = ip[minInputSize-1];
+        size_t pos = minInputSize;
+        U32 const dictIDSizeCode = fhdByte&3;
+        U32 const checksumFlag = (fhdByte>>2)&1;
+        U32 const singleSegment = (fhdByte>>5)&1;
+        U32 const fcsID = fhdByte>>6;
+        U64 windowSize = 0;
+        U32 dictID = 0;
+        U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;
+        if ((fhdByte & 0x08) != 0)
+            return ERROR(frameParameter_unsupported); /* reserved bits, must be zero */
+
+        if (!singleSegment) {
+            BYTE const wlByte = ip[pos++];
+            U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
+            if (windowLog > ZSTD_WINDOWLOG_MAX)
+                return ERROR(frameParameter_windowTooLarge);
+            windowSize = (1ULL << windowLog);
+            windowSize += (windowSize >> 3) * (wlByte&7);
+        }
+        switch(dictIDSizeCode)
+        {
+            default: assert(0);  /* impossible */
+            case 0 : break;
+            case 1 : dictID = ip[pos]; pos++; break;
+            case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;
+            case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break;
+        }
+        switch(fcsID)
+        {
+            default: assert(0);  /* impossible */
+            case 0 : if (singleSegment) frameContentSize = ip[pos]; break;
+            case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;
+            case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
+            case 3 : frameContentSize = MEM_readLE64(ip+pos); break;
+        }
+        if (singleSegment) windowSize = frameContentSize;
+
+        zfhPtr->frameType = ZSTD_frame;
+        zfhPtr->frameContentSize = frameContentSize;
+        zfhPtr->windowSize = windowSize;
+        zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+        zfhPtr->dictID = dictID;
+        zfhPtr->checksumFlag = checksumFlag;
+    }
+    return 0;
+}
+
+/** ZSTD_getFrameHeader() :
+ *  decode Frame Header, or require larger `srcSize`.
+ *  note : this function does not consume input, it only reads it.
+ * @return : 0, `zfhPtr` is correctly filled,
+ *          >0, `srcSize` is too small, value is wanted `srcSize` amount,
+ *           or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
+{
+    return ZSTD_getFrameHeader_internal(zfhPtr, src, srcSize, ZSTD_f_zstd1);
+}
+
+
+/** ZSTD_getFrameContentSize() :
+ *  compatible with legacy mode
+ * @return : decompressed size of the single frame pointed to be `src` if known, otherwise
+ *         - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
+ *         - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
+unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
+{
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+    if (ZSTD_isLegacy(src, srcSize)) {
+        unsigned long long const ret = ZSTD_getDecompressedSize_legacy(src, srcSize);
+        return ret == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret;
+    }
+#endif
+    {   ZSTD_frameHeader zfh;
+        if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0)
+            return ZSTD_CONTENTSIZE_ERROR;
+        if (zfh.frameType == ZSTD_skippableFrame) {
+            return 0;
+        } else {
+            return zfh.frameContentSize;
+    }   }
+}
+
+/** ZSTD_findDecompressedSize() :
+ *  compatible with legacy mode
+ *  `srcSize` must be the exact length of some number of ZSTD compressed and/or
+ *      skippable frames
+ *  @return : decompressed size of the frames contained */
+unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
+{
+    unsigned long long totalDstSize = 0;
+
+    while (srcSize >= ZSTD_frameHeaderSize_prefix) {
+        U32 const magicNumber = MEM_readLE32(src);
+
+        if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
+            size_t skippableSize;
+            if (srcSize < ZSTD_skippableHeaderSize)
+                return ERROR(srcSize_wrong);
+            skippableSize = MEM_readLE32((const BYTE *)src + ZSTD_frameIdSize)
+                          + ZSTD_skippableHeaderSize;
+            if (srcSize < skippableSize) {
+                return ZSTD_CONTENTSIZE_ERROR;
+            }
+
+            src = (const BYTE *)src + skippableSize;
+            srcSize -= skippableSize;
+            continue;
+        }
+
+        {   unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
+            if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret;
+
+            /* check for overflow */
+            if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR;
+            totalDstSize += ret;
+        }
+        {   size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
+            if (ZSTD_isError(frameSrcSize)) {
+                return ZSTD_CONTENTSIZE_ERROR;
+            }
+
+            src = (const BYTE *)src + frameSrcSize;
+            srcSize -= frameSrcSize;
+        }
+    }  /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
+
+    if (srcSize) return ZSTD_CONTENTSIZE_ERROR;
+
+    return totalDstSize;
+}
+
+/** ZSTD_getDecompressedSize() :
+*   compatible with legacy mode
+*   @return : decompressed size if known, 0 otherwise
+              note : 0 can mean any of the following :
+                   - frame content is empty
+                   - decompressed size field is not present in frame header
+                   - frame header unknown / not supported
+                   - frame header not complete (`srcSize` too small) */
+unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
+{
+    unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
+    ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN);
+    return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret;
+}
+
+
+/** ZSTD_decodeFrameHeader() :
+*   `headerSize` must be the size provided by ZSTD_frameHeaderSize().
+*   @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
+static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)
+{
+    size_t const result = ZSTD_getFrameHeader_internal(&(dctx->fParams), src, headerSize, dctx->format);
+    if (ZSTD_isError(result)) return result;    /* invalid header */
+    if (result>0) return ERROR(srcSize_wrong);  /* headerSize too small */
+    if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID))
+        return ERROR(dictionary_wrong);
+    if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);
+    return 0;
+}
+
+
+/*-*************************************************************
+ *   Block decoding
+ ***************************************************************/
+
+/*! ZSTD_getcBlockSize() :
+*   Provides the size of compressed block from block header `src` */
+size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
+                          blockProperties_t* bpPtr)
+{
+    if (srcSize < ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+    {   U32 const cBlockHeader = MEM_readLE24(src);
+        U32 const cSize = cBlockHeader >> 3;
+        bpPtr->lastBlock = cBlockHeader & 1;
+        bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
+        bpPtr->origSize = cSize;   /* only useful for RLE */
+        if (bpPtr->blockType == bt_rle) return 1;
+        if (bpPtr->blockType == bt_reserved) return ERROR(corruption_detected);
+        return cSize;
+    }
+}
+
+
+static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
+                          const void* src, size_t srcSize)
+{
+    if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
+    memcpy(dst, src, srcSize);
+    return srcSize;
+}
+
+
+static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
+                         const void* src, size_t srcSize,
+                               size_t regenSize)
+{
+    if (srcSize != 1) return ERROR(srcSize_wrong);
+    if (regenSize > dstCapacity) return ERROR(dstSize_tooSmall);
+    memset(dst, *(const BYTE*)src, regenSize);
+    return regenSize;
+}
+
+/*! ZSTD_decodeLiteralsBlock() :
+ * @return : nb of bytes read from src (< srcSize )
+ *  note : symbol not declared but exposed for fullbench */
+size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+                          const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
+{
+    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
+
+    {   const BYTE* const istart = (const BYTE*) src;
+        symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
+
+        switch(litEncType)
+        {
+        case set_repeat:
+            if (dctx->litEntropy==0) return ERROR(dictionary_corrupted);
+            /* fall-through */
+        case set_compressed:
+            if (srcSize < 5) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
+            {   size_t lhSize, litSize, litCSize;
+                U32 singleStream=0;
+                U32 const lhlCode = (istart[0] >> 2) & 3;
+                U32 const lhc = MEM_readLE32(istart);
+                switch(lhlCode)
+                {
+                case 0: case 1: default:   /* note : default is impossible, since lhlCode into [0..3] */
+                    /* 2 - 2 - 10 - 10 */
+                    singleStream = !lhlCode;
+                    lhSize = 3;
+                    litSize  = (lhc >> 4) & 0x3FF;
+                    litCSize = (lhc >> 14) & 0x3FF;
+                    break;
+                case 2:
+                    /* 2 - 2 - 14 - 14 */
+                    lhSize = 4;
+                    litSize  = (lhc >> 4) & 0x3FFF;
+                    litCSize = lhc >> 18;
+                    break;
+                case 3:
+                    /* 2 - 2 - 18 - 18 */
+                    lhSize = 5;
+                    litSize  = (lhc >> 4) & 0x3FFFF;
+                    litCSize = (lhc >> 22) + (istart[4] << 10);
+                    break;
+                }
+                if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
+                if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
+
+                if (HUF_isError((litEncType==set_repeat) ?
+                                    ( singleStream ?
+                                        HUF_decompress1X_usingDTable_bmi2(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr, dctx->bmi2) :
+                                        HUF_decompress4X_usingDTable_bmi2(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr, dctx->bmi2) ) :
+                                    ( singleStream ?
+                                        HUF_decompress1X2_DCtx_wksp_bmi2(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize,
+                                                                         dctx->entropy.workspace, sizeof(dctx->entropy.workspace), dctx->bmi2) :
+                                        HUF_decompress4X_hufOnly_wksp_bmi2(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize,
+                                                                           dctx->entropy.workspace, sizeof(dctx->entropy.workspace), dctx->bmi2))))
+                    return ERROR(corruption_detected);
+
+                dctx->litPtr = dctx->litBuffer;
+                dctx->litSize = litSize;
+                dctx->litEntropy = 1;
+                if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
+                memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+                return litCSize + lhSize;
+            }
+
+        case set_basic:
+            {   size_t litSize, lhSize;
+                U32 const lhlCode = ((istart[0]) >> 2) & 3;
+                switch(lhlCode)
+                {
+                case 0: case 2: default:   /* note : default is impossible, since lhlCode into [0..3] */
+                    lhSize = 1;
+                    litSize = istart[0] >> 3;
+                    break;
+                case 1:
+                    lhSize = 2;
+                    litSize = MEM_readLE16(istart) >> 4;
+                    break;
+                case 3:
+                    lhSize = 3;
+                    litSize = MEM_readLE24(istart) >> 4;
+                    break;
+                }
+
+                if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) {  /* risk reading beyond src buffer with wildcopy */
+                    if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
+                    memcpy(dctx->litBuffer, istart+lhSize, litSize);
+                    dctx->litPtr = dctx->litBuffer;
+                    dctx->litSize = litSize;
+                    memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+                    return lhSize+litSize;
+                }
+                /* direct reference into compressed stream */
+                dctx->litPtr = istart+lhSize;
+                dctx->litSize = litSize;
+                return lhSize+litSize;
+            }
+
+        case set_rle:
+            {   U32 const lhlCode = ((istart[0]) >> 2) & 3;
+                size_t litSize, lhSize;
+                switch(lhlCode)
+                {
+                case 0: case 2: default:   /* note : default is impossible, since lhlCode into [0..3] */
+                    lhSize = 1;
+                    litSize = istart[0] >> 3;
+                    break;
+                case 1:
+                    lhSize = 2;
+                    litSize = MEM_readLE16(istart) >> 4;
+                    break;
+                case 3:
+                    lhSize = 3;
+                    litSize = MEM_readLE24(istart) >> 4;
+                    if (srcSize<4) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
+                    break;
+                }
+                if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
+                memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
+                dctx->litPtr = dctx->litBuffer;
+                dctx->litSize = litSize;
+                return lhSize+1;
+            }
+        default:
+            return ERROR(corruption_detected);   /* impossible */
+        }
+    }
+}
+
+/* Default FSE distribution tables.
+ * These are pre-calculated FSE decoding tables using default distributions as defined in specification :
+ * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#default-distributions
+ * They were generated programmatically with following method :
+ * - start from default distributions, present in /lib/common/zstd_internal.h
+ * - generate tables normally, using ZSTD_buildFSETable()
+ * - printout the content of tables
+ * - pretify output, report below, test with fuzzer to ensure it's correct */
+
+/* Default FSE distribution table for Literal Lengths */
+static const ZSTD_seqSymbol LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = {
+     {  1,  1,  1, LL_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
+     /* nextState, nbAddBits, nbBits, baseVal */
+     {  0,  0,  4,    0},  { 16,  0,  4,    0},
+     { 32,  0,  5,    1},  {  0,  0,  5,    3},
+     {  0,  0,  5,    4},  {  0,  0,  5,    6},
+     {  0,  0,  5,    7},  {  0,  0,  5,    9},
+     {  0,  0,  5,   10},  {  0,  0,  5,   12},
+     {  0,  0,  6,   14},  {  0,  1,  5,   16},
+     {  0,  1,  5,   20},  {  0,  1,  5,   22},
+     {  0,  2,  5,   28},  {  0,  3,  5,   32},
+     {  0,  4,  5,   48},  { 32,  6,  5,   64},
+     {  0,  7,  5,  128},  {  0,  8,  6,  256},
+     {  0, 10,  6, 1024},  {  0, 12,  6, 4096},
+     { 32,  0,  4,    0},  {  0,  0,  4,    1},
+     {  0,  0,  5,    2},  { 32,  0,  5,    4},
+     {  0,  0,  5,    5},  { 32,  0,  5,    7},
+     {  0,  0,  5,    8},  { 32,  0,  5,   10},
+     {  0,  0,  5,   11},  {  0,  0,  6,   13},
+     { 32,  1,  5,   16},  {  0,  1,  5,   18},
+     { 32,  1,  5,   22},  {  0,  2,  5,   24},
+     { 32,  3,  5,   32},  {  0,  3,  5,   40},
+     {  0,  6,  4,   64},  { 16,  6,  4,   64},
+     { 32,  7,  5,  128},  {  0,  9,  6,  512},
+     {  0, 11,  6, 2048},  { 48,  0,  4,    0},
+     { 16,  0,  4,    1},  { 32,  0,  5,    2},
+     { 32,  0,  5,    3},  { 32,  0,  5,    5},
+     { 32,  0,  5,    6},  { 32,  0,  5,    8},
+     { 32,  0,  5,    9},  { 32,  0,  5,   11},
+     { 32,  0,  5,   12},  {  0,  0,  6,   15},
+     { 32,  1,  5,   18},  { 32,  1,  5,   20},
+     { 32,  2,  5,   24},  { 32,  2,  5,   28},
+     { 32,  3,  5,   40},  { 32,  4,  5,   48},
+     {  0, 16,  6,65536},  {  0, 15,  6,32768},
+     {  0, 14,  6,16384},  {  0, 13,  6, 8192},
+};   /* LL_defaultDTable */
+
+/* Default FSE distribution table for Offset Codes */
+static const ZSTD_seqSymbol OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = {
+    {  1,  1,  1, OF_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
+    /* nextState, nbAddBits, nbBits, baseVal */
+    {  0,  0,  5,    0},     {  0,  6,  4,   61},
+    {  0,  9,  5,  509},     {  0, 15,  5,32765},
+    {  0, 21,  5,2097149},   {  0,  3,  5,    5},
+    {  0,  7,  4,  125},     {  0, 12,  5, 4093},
+    {  0, 18,  5,262141},    {  0, 23,  5,8388605},
+    {  0,  5,  5,   29},     {  0,  8,  4,  253},
+    {  0, 14,  5,16381},     {  0, 20,  5,1048573},
+    {  0,  2,  5,    1},     { 16,  7,  4,  125},
+    {  0, 11,  5, 2045},     {  0, 17,  5,131069},
+    {  0, 22,  5,4194301},   {  0,  4,  5,   13},
+    { 16,  8,  4,  253},     {  0, 13,  5, 8189},
+    {  0, 19,  5,524285},    {  0,  1,  5,    1},
+    { 16,  6,  4,   61},     {  0, 10,  5, 1021},
+    {  0, 16,  5,65533},     {  0, 28,  5,268435453},
+    {  0, 27,  5,134217725}, {  0, 26,  5,67108861},
+    {  0, 25,  5,33554429},  {  0, 24,  5,16777213},
+};   /* OF_defaultDTable */
+
+
+/* Default FSE distribution table for Match Lengths */
+static const ZSTD_seqSymbol ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = {
+    {  1,  1,  1, ML_DEFAULTNORMLOG},  /* header : fastMode, tableLog */
+    /* nextState, nbAddBits, nbBits, baseVal */
+    {  0,  0,  6,    3},  {  0,  0,  4,    4},
+    { 32,  0,  5,    5},  {  0,  0,  5,    6},
+    {  0,  0,  5,    8},  {  0,  0,  5,    9},
+    {  0,  0,  5,   11},  {  0,  0,  6,   13},
+    {  0,  0,  6,   16},  {  0,  0,  6,   19},
+    {  0,  0,  6,   22},  {  0,  0,  6,   25},
+    {  0,  0,  6,   28},  {  0,  0,  6,   31},
+    {  0,  0,  6,   34},  {  0,  1,  6,   37},
+    {  0,  1,  6,   41},  {  0,  2,  6,   47},
+    {  0,  3,  6,   59},  {  0,  4,  6,   83},
+    {  0,  7,  6,  131},  {  0,  9,  6,  515},
+    { 16,  0,  4,    4},  {  0,  0,  4,    5},
+    { 32,  0,  5,    6},  {  0,  0,  5,    7},
+    { 32,  0,  5,    9},  {  0,  0,  5,   10},
+    {  0,  0,  6,   12},  {  0,  0,  6,   15},
+    {  0,  0,  6,   18},  {  0,  0,  6,   21},
+    {  0,  0,  6,   24},  {  0,  0,  6,   27},
+    {  0,  0,  6,   30},  {  0,  0,  6,   33},
+    {  0,  1,  6,   35},  {  0,  1,  6,   39},
+    {  0,  2,  6,   43},  {  0,  3,  6,   51},
+    {  0,  4,  6,   67},  {  0,  5,  6,   99},
+    {  0,  8,  6,  259},  { 32,  0,  4,    4},
+    { 48,  0,  4,    4},  { 16,  0,  4,    5},
+    { 32,  0,  5,    7},  { 32,  0,  5,    8},
+    { 32,  0,  5,   10},  { 32,  0,  5,   11},
+    {  0,  0,  6,   14},  {  0,  0,  6,   17},
+    {  0,  0,  6,   20},  {  0,  0,  6,   23},
+    {  0,  0,  6,   26},  {  0,  0,  6,   29},
+    {  0,  0,  6,   32},  {  0, 16,  6,65539},
+    {  0, 15,  6,32771},  {  0, 14,  6,16387},
+    {  0, 13,  6, 8195},  {  0, 12,  6, 4099},
+    {  0, 11,  6, 2051},  {  0, 10,  6, 1027},
+};   /* ML_defaultDTable */
+
+
+static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U32 nbAddBits)
+{
+    void* ptr = dt;
+    ZSTD_seqSymbol_header* const DTableH = (ZSTD_seqSymbol_header*)ptr;
+    ZSTD_seqSymbol* const cell = dt + 1;
+
+    DTableH->tableLog = 0;
+    DTableH->fastMode = 0;
+
+    cell->nbBits = 0;
+    cell->nextState = 0;
+    assert(nbAddBits < 255);
+    cell->nbAdditionalBits = (BYTE)nbAddBits;
+    cell->baseValue = baseValue;
+}
+
+
+/* ZSTD_buildFSETable() :
+ * generate FSE decoding table for one symbol (ll, ml or off) */
+static void
+ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
+    const short* normalizedCounter, unsigned maxSymbolValue,
+    const U32* baseValue, const U32* nbAdditionalBits,
+    unsigned tableLog)
+{
+    ZSTD_seqSymbol* const tableDecode = dt+1;
+    U16 symbolNext[MaxSeq+1];
+
+    U32 const maxSV1 = maxSymbolValue + 1;
+    U32 const tableSize = 1 << tableLog;
+    U32 highThreshold = tableSize-1;
+
+    /* Sanity Checks */
+    assert(maxSymbolValue <= MaxSeq);
+    assert(tableLog <= MaxFSELog);
+
+    /* Init, lay down lowprob symbols */
+    {   ZSTD_seqSymbol_header DTableH;
+        DTableH.tableLog = tableLog;
+        DTableH.fastMode = 1;
+        {   S16 const largeLimit= (S16)(1 << (tableLog-1));
+            U32 s;
+            for (s=0; s<maxSV1; s++) {
+                if (normalizedCounter[s]==-1) {
+                    tableDecode[highThreshold--].baseValue = s;
+                    symbolNext[s] = 1;
+                } else {
+                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
+                    symbolNext[s] = normalizedCounter[s];
+        }   }   }
+        memcpy(dt, &DTableH, sizeof(DTableH));
+    }
+
+    /* Spread symbols */
+    {   U32 const tableMask = tableSize-1;
+        U32 const step = FSE_TABLESTEP(tableSize);
+        U32 s, position = 0;
+        for (s=0; s<maxSV1; s++) {
+            int i;
+            for (i=0; i<normalizedCounter[s]; i++) {
+                tableDecode[position].baseValue = s;
+                position = (position + step) & tableMask;
+                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
+        }   }
+        assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+    }
+
+    /* Build Decoding table */
+    {   U32 u;
+        for (u=0; u<tableSize; u++) {
+            U32 const symbol = tableDecode[u].baseValue;
+            U32 const nextState = symbolNext[symbol]++;
+            tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
+            tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
+            assert(nbAdditionalBits[symbol] < 255);
+            tableDecode[u].nbAdditionalBits = (BYTE)nbAdditionalBits[symbol];
+            tableDecode[u].baseValue = baseValue[symbol];
+    }   }
+}
+
+
+/*! ZSTD_buildSeqTable() :
+ * @return : nb bytes read from src,
+ *           or an error code if it fails */
+static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr,
+                                 symbolEncodingType_e type, U32 max, U32 maxLog,
+                                 const void* src, size_t srcSize,
+                                 const U32* baseValue, const U32* nbAdditionalBits,
+                                 const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable)
+{
+    switch(type)
+    {
+    case set_rle :
+        if (!srcSize) return ERROR(srcSize_wrong);
+        if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected);
+        {   U32 const symbol = *(const BYTE*)src;
+            U32 const baseline = baseValue[symbol];
+            U32 const nbBits = nbAdditionalBits[symbol];
+            ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits);
+        }
+        *DTablePtr = DTableSpace;
+        return 1;
+    case set_basic :
+        *DTablePtr = defaultTable;
+        return 0;
+    case set_repeat:
+        if (!flagRepeatTable) return ERROR(corruption_detected);
+        return 0;
+    case set_compressed :
+        {   U32 tableLog;
+            S16 norm[MaxSeq+1];
+            size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
+            if (FSE_isError(headerSize)) return ERROR(corruption_detected);
+            if (tableLog > maxLog) return ERROR(corruption_detected);
+            ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog);
+            *DTablePtr = DTableSpace;
+            return headerSize;
+        }
+    default :   /* impossible */
+        assert(0);
+        return ERROR(GENERIC);
+    }
+}
+
+static const U32 LL_base[MaxLL+1] = {
+                 0,    1,    2,     3,     4,     5,     6,      7,
+                 8,    9,   10,    11,    12,    13,    14,     15,
+                16,   18,   20,    22,    24,    28,    32,     40,
+                48,   64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
+                0x2000, 0x4000, 0x8000, 0x10000 };
+
+static const U32 OF_base[MaxOff+1] = {
+                 0,        1,       1,       5,     0xD,     0x1D,     0x3D,     0x7D,
+                 0xFD,   0x1FD,   0x3FD,   0x7FD,   0xFFD,   0x1FFD,   0x3FFD,   0x7FFD,
+                 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
+                 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
+
+static const U32 OF_bits[MaxOff+1] = {
+                     0,  1,  2,  3,  4,  5,  6,  7,
+                     8,  9, 10, 11, 12, 13, 14, 15,
+                    16, 17, 18, 19, 20, 21, 22, 23,
+                    24, 25, 26, 27, 28, 29, 30, 31 };
+
+static const U32 ML_base[MaxML+1] = {
+                     3,  4,  5,    6,     7,     8,     9,    10,
+                    11, 12, 13,   14,    15,    16,    17,    18,
+                    19, 20, 21,   22,    23,    24,    25,    26,
+                    27, 28, 29,   30,    31,    32,    33,    34,
+                    35, 37, 39,   41,    43,    47,    51,    59,
+                    67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
+                    0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
+
+
+size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
+                             const void* src, size_t srcSize)
+{
+    const BYTE* const istart = (const BYTE* const)src;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* ip = istart;
+    DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
+
+    /* check */
+    if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
+
+    /* SeqHead */
+    {   int nbSeq = *ip++;
+        if (!nbSeq) { *nbSeqPtr=0; return 1; }
+        if (nbSeq > 0x7F) {
+            if (nbSeq == 0xFF) {
+                if (ip+2 > iend) return ERROR(srcSize_wrong);
+                nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
+            } else {
+                if (ip >= iend) return ERROR(srcSize_wrong);
+                nbSeq = ((nbSeq-0x80)<<8) + *ip++;
+            }
+        }
+        *nbSeqPtr = nbSeq;
+    }
+
+    /* FSE table descriptors */
+    if (ip+4 > iend) return ERROR(srcSize_wrong); /* minimum possible size */
+    {   symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
+        symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
+        symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
+        ip++;
+
+        /* Build DTables */
+        {   size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
+                                                      LLtype, MaxLL, LLFSELog,
+                                                      ip, iend-ip,
+                                                      LL_base, LL_bits,
+                                                      LL_defaultDTable, dctx->fseEntropy);
+            if (ZSTD_isError(llhSize)) return ERROR(corruption_detected);
+            ip += llhSize;
+        }
+
+        {   size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
+                                                      OFtype, MaxOff, OffFSELog,
+                                                      ip, iend-ip,
+                                                      OF_base, OF_bits,
+                                                      OF_defaultDTable, dctx->fseEntropy);
+            if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected);
+            ip += ofhSize;
+        }
+
+        {   size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
+                                                      MLtype, MaxML, MLFSELog,
+                                                      ip, iend-ip,
+                                                      ML_base, ML_bits,
+                                                      ML_defaultDTable, dctx->fseEntropy);
+            if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected);
+            ip += mlhSize;
+        }
+    }
+
+    return ip-istart;
+}
+
+
+typedef struct {
+    size_t litLength;
+    size_t matchLength;
+    size_t offset;
+    const BYTE* match;
+} seq_t;
+
+typedef struct {
+    size_t state;
+    const ZSTD_seqSymbol* table;
+} ZSTD_fseState;
+
+typedef struct {
+    BIT_DStream_t DStream;
+    ZSTD_fseState stateLL;
+    ZSTD_fseState stateOffb;
+    ZSTD_fseState stateML;
+    size_t prevOffset[ZSTD_REP_NUM];
+    const BYTE* prefixStart;
+    const BYTE* dictEnd;
+    size_t pos;
+} seqState_t;
+
+
+FORCE_NOINLINE
+size_t ZSTD_execSequenceLast7(BYTE* op,
+                              BYTE* const oend, seq_t sequence,
+                              const BYTE** litPtr, const BYTE* const litLimit,
+                              const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
+{
+    BYTE* const oLitEnd = op + sequence.litLength;
+    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
+    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
+    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+    const BYTE* match = oLitEnd - sequence.offset;
+
+    /* check */
+    if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
+    if (iLitEnd > litLimit) return ERROR(corruption_detected);   /* over-read beyond lit buffer */
+    if (oLitEnd <= oend_w) return ERROR(GENERIC);   /* Precondition */
+
+    /* copy literals */
+    if (op < oend_w) {
+        ZSTD_wildcopy(op, *litPtr, oend_w - op);
+        *litPtr += oend_w - op;
+        op = oend_w;
+    }
+    while (op < oLitEnd) *op++ = *(*litPtr)++;
+
+    /* copy Match */
+    if (sequence.offset > (size_t)(oLitEnd - base)) {
+        /* offset beyond prefix */
+        if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
+        match = dictEnd - (base-match);
+        if (match + sequence.matchLength <= dictEnd) {
+            memmove(oLitEnd, match, sequence.matchLength);
+            return sequenceLength;
+        }
+        /* span extDict & currentPrefixSegment */
+        {   size_t const length1 = dictEnd - match;
+            memmove(oLitEnd, match, length1);
+            op = oLitEnd + length1;
+            sequence.matchLength -= length1;
+            match = base;
+    }   }
+    while (op < oMatchEnd) *op++ = *match++;
+    return sequenceLength;
+}
+
+
+HINT_INLINE
+size_t ZSTD_execSequence(BYTE* op,
+                         BYTE* const oend, seq_t sequence,
+                         const BYTE** litPtr, const BYTE* const litLimit,
+                         const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
+{
+    BYTE* const oLitEnd = op + sequence.litLength;
+    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
+    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
+    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+    const BYTE* match = oLitEnd - sequence.offset;
+
+    /* check */
+    if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
+    if (iLitEnd > litLimit) return ERROR(corruption_detected);   /* over-read beyond lit buffer */
+    if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
+
+    /* copy Literals */
+    ZSTD_copy8(op, *litPtr);
+    if (sequence.litLength > 8)
+        ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8);   /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
+    op = oLitEnd;
+    *litPtr = iLitEnd;   /* update for next sequence */
+
+    /* copy Match */
+    if (sequence.offset > (size_t)(oLitEnd - base)) {
+        /* offset beyond prefix -> go into extDict */
+        if (sequence.offset > (size_t)(oLitEnd - vBase))
+            return ERROR(corruption_detected);
+        match = dictEnd + (match - base);
+        if (match + sequence.matchLength <= dictEnd) {
+            memmove(oLitEnd, match, sequence.matchLength);
+            return sequenceLength;
+        }
+        /* span extDict & currentPrefixSegment */
+        {   size_t const length1 = dictEnd - match;
+            memmove(oLitEnd, match, length1);
+            op = oLitEnd + length1;
+            sequence.matchLength -= length1;
+            match = base;
+            if (op > oend_w || sequence.matchLength < MINMATCH) {
+              U32 i;
+              for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
+              return sequenceLength;
+            }
+    }   }
+    /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
+
+    /* match within prefix */
+    if (sequence.offset < 8) {
+        /* close range match, overlap */
+        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
+        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
+        int const sub2 = dec64table[sequence.offset];
+        op[0] = match[0];
+        op[1] = match[1];
+        op[2] = match[2];
+        op[3] = match[3];
+        match += dec32table[sequence.offset];
+        ZSTD_copy4(op+4, match);
+        match -= sub2;
+    } else {
+        ZSTD_copy8(op, match);
+    }
+    op += 8; match += 8;
+
+    if (oMatchEnd > oend-(16-MINMATCH)) {
+        if (op < oend_w) {
+            ZSTD_wildcopy(op, match, oend_w - op);
+            match += oend_w - op;
+            op = oend_w;
+        }
+        while (op < oMatchEnd) *op++ = *match++;
+    } else {
+        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
+    }
+    return sequenceLength;
+}
+
+
+HINT_INLINE
+size_t ZSTD_execSequenceLong(BYTE* op,
+                             BYTE* const oend, seq_t sequence,
+                             const BYTE** litPtr, const BYTE* const litLimit,
+                             const BYTE* const prefixStart, const BYTE* const dictStart, const BYTE* const dictEnd)
+{
+    BYTE* const oLitEnd = op + sequence.litLength;
+    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
+    BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
+    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+    const BYTE* match = sequence.match;
+
+    /* check */
+    if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
+    if (iLitEnd > litLimit) return ERROR(corruption_detected);   /* over-read beyond lit buffer */
+    if (oLitEnd > oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, dictStart, dictEnd);
+
+    /* copy Literals */
+    ZSTD_copy8(op, *litPtr);  /* note : op <= oLitEnd <= oend_w == oend - 8 */
+    if (sequence.litLength > 8)
+        ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8);   /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
+    op = oLitEnd;
+    *litPtr = iLitEnd;   /* update for next sequence */
+
+    /* copy Match */
+    if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
+        /* offset beyond prefix */
+        if (sequence.offset > (size_t)(oLitEnd - dictStart)) return ERROR(corruption_detected);
+        if (match + sequence.matchLength <= dictEnd) {
+            memmove(oLitEnd, match, sequence.matchLength);
+            return sequenceLength;
+        }
+        /* span extDict & currentPrefixSegment */
+        {   size_t const length1 = dictEnd - match;
+            memmove(oLitEnd, match, length1);
+            op = oLitEnd + length1;
+            sequence.matchLength -= length1;
+            match = prefixStart;
+            if (op > oend_w || sequence.matchLength < MINMATCH) {
+              U32 i;
+              for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
+              return sequenceLength;
+            }
+    }   }
+    assert(op <= oend_w);
+    assert(sequence.matchLength >= MINMATCH);
+
+    /* match within prefix */
+    if (sequence.offset < 8) {
+        /* close range match, overlap */
+        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
+        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* subtracted */
+        int const sub2 = dec64table[sequence.offset];
+        op[0] = match[0];
+        op[1] = match[1];
+        op[2] = match[2];
+        op[3] = match[3];
+        match += dec32table[sequence.offset];
+        ZSTD_copy4(op+4, match);
+        match -= sub2;
+    } else {
+        ZSTD_copy8(op, match);
+    }
+    op += 8; match += 8;
+
+    if (oMatchEnd > oend-(16-MINMATCH)) {
+        if (op < oend_w) {
+            ZSTD_wildcopy(op, match, oend_w - op);
+            match += oend_w - op;
+            op = oend_w;
+        }
+        while (op < oMatchEnd) *op++ = *match++;
+    } else {
+        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
+    }
+    return sequenceLength;
+}
+
+static void
+ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt)
+{
+    const void* ptr = dt;
+    const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr;
+    DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
+    DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits",
+                (U32)DStatePtr->state, DTableH->tableLog);
+    BIT_reloadDStream(bitD);
+    DStatePtr->table = dt + 1;
+}
+
+FORCE_INLINE_TEMPLATE void
+ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD)
+{
+    ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state];
+    U32 const nbBits = DInfo.nbBits;
+    size_t const lowBits = BIT_readBits(bitD, nbBits);
+    DStatePtr->state = DInfo.nextState + lowBits;
+}
+
+/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
+ * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
+ * bits before reloading. This value is the maximum number of bytes we read
+ * after reloading when we are decoding long offets.
+ */
+#define LONG_OFFSETS_MAX_EXTRA_BITS_32                       \
+    (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32       \
+        ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32  \
+        : 0)
+
+typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
+
+FORCE_INLINE_TEMPLATE seq_t
+ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
+{
+    seq_t seq;
+    U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits;
+    U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits;
+    U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits;
+    U32 const totalBits = llBits+mlBits+ofBits;
+    U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue;
+    U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue;
+    U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue;
+
+    /* sequence */
+    {   size_t offset;
+        if (!ofBits)
+            offset = 0;
+        else {
+            ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
+            ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
+            assert(ofBits <= MaxOff);
+            if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
+                U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);
+                offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
+                BIT_reloadDStream(&seqState->DStream);
+                if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
+                assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32);   /* to avoid another reload */
+            } else {
+                offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/);   /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
+                if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
+            }
+        }
+
+        if (ofBits <= 1) {
+            offset += (llBase==0);
+            if (offset) {
+                size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
+                temp += !temp;   /* 0 is not valid; input is corrupted; force offset to 1 */
+                if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
+                seqState->prevOffset[1] = seqState->prevOffset[0];
+                seqState->prevOffset[0] = offset = temp;
+            } else {  /* offset == 0 */
+                offset = seqState->prevOffset[0];
+            }
+        } else {
+            seqState->prevOffset[2] = seqState->prevOffset[1];
+            seqState->prevOffset[1] = seqState->prevOffset[0];
+            seqState->prevOffset[0] = offset;
+        }
+        seq.offset = offset;
+    }
+
+    seq.matchLength = mlBase
+                    + ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/) : 0);  /* <=  16 bits */
+    if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
+        BIT_reloadDStream(&seqState->DStream);
+    if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
+        BIT_reloadDStream(&seqState->DStream);
+    /* Ensure there are enough bits to read the rest of data in 64-bit mode. */
+    ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
+
+    seq.litLength = llBase
+                  + ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits/*>0*/) : 0);    /* <=  16 bits */
+    if (MEM_32bits())
+        BIT_reloadDStream(&seqState->DStream);
+
+    DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
+                (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
+
+    /* ANS state update */
+    ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream);    /* <=  9 bits */
+    ZSTD_updateFseState(&seqState->stateML, &seqState->DStream);    /* <=  9 bits */
+    if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);    /* <= 18 bits */
+    ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream);  /* <=  8 bits */
+
+    return seq;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
+                               void* dst, size_t maxDstSize,
+                         const void* seqStart, size_t seqSize, int nbSeq,
+                         const ZSTD_longOffset_e isLongOffset)
+{
+    const BYTE* ip = (const BYTE*)seqStart;
+    const BYTE* const iend = ip + seqSize;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* const oend = ostart + maxDstSize;
+    BYTE* op = ostart;
+    const BYTE* litPtr = dctx->litPtr;
+    const BYTE* const litEnd = litPtr + dctx->litSize;
+    const BYTE* const base = (const BYTE*) (dctx->base);
+    const BYTE* const vBase = (const BYTE*) (dctx->vBase);
+    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+    DEBUGLOG(5, "ZSTD_decompressSequences");
+
+    /* Regen sequences */
+    if (nbSeq) {
+        seqState_t seqState;
+        dctx->fseEntropy = 1;
+        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
+        CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
+        ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
+        ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
+        ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+
+        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) {
+            nbSeq--;
+            {   seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
+                size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
+                DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
+                if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+                op += oneSeqSize;
+        }   }
+
+        /* check if reached exact end */
+        DEBUGLOG(5, "ZSTD_decompressSequences: after decode loop, remaining nbSeq : %i", nbSeq);
+        if (nbSeq) return ERROR(corruption_detected);
+        /* save reps for next block */
+        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
+    }
+
+    /* last literal segment */
+    {   size_t const lastLLSize = litEnd - litPtr;
+        if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
+        memcpy(op, litPtr, lastLLSize);
+        op += lastLLSize;
+    }
+
+    return op-ostart;
+}
+
+static size_t
+ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
+                                 void* dst, size_t maxDstSize,
+                           const void* seqStart, size_t seqSize, int nbSeq,
+                           const ZSTD_longOffset_e isLongOffset)
+{
+    return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+
+
+
+FORCE_INLINE_TEMPLATE seq_t
+ZSTD_decodeSequenceLong(seqState_t* seqState, ZSTD_longOffset_e const longOffsets)
+{
+    seq_t seq;
+    U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits;
+    U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits;
+    U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits;
+    U32 const totalBits = llBits+mlBits+ofBits;
+    U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue;
+    U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue;
+    U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue;
+
+    /* sequence */
+    {   size_t offset;
+        if (!ofBits)
+            offset = 0;
+        else {
+            ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
+            ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
+            assert(ofBits <= MaxOff);
+            if (MEM_32bits() && longOffsets) {
+                U32 const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN_32-1);
+                offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
+                if (MEM_32bits() || extraBits) BIT_reloadDStream(&seqState->DStream);
+                if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
+            } else {
+                offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits);   /* <=  (ZSTD_WINDOWLOG_MAX-1) bits */
+                if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
+            }
+        }
+
+        if (ofBits <= 1) {
+            offset += (llBase==0);
+            if (offset) {
+                size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
+                temp += !temp;   /* 0 is not valid; input is corrupted; force offset to 1 */
+                if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
+                seqState->prevOffset[1] = seqState->prevOffset[0];
+                seqState->prevOffset[0] = offset = temp;
+            } else {
+                offset = seqState->prevOffset[0];
+            }
+        } else {
+            seqState->prevOffset[2] = seqState->prevOffset[1];
+            seqState->prevOffset[1] = seqState->prevOffset[0];
+            seqState->prevOffset[0] = offset;
+        }
+        seq.offset = offset;
+    }
+
+    seq.matchLength = mlBase + ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0);  /* <=  16 bits */
+    if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
+        BIT_reloadDStream(&seqState->DStream);
+    if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
+        BIT_reloadDStream(&seqState->DStream);
+    /* Verify that there is enough bits to read the rest of the data in 64-bit mode. */
+    ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
+
+    seq.litLength = llBase + ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0);    /* <=  16 bits */
+    if (MEM_32bits())
+        BIT_reloadDStream(&seqState->DStream);
+
+    {   size_t const pos = seqState->pos + seq.litLength;
+        const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart;
+        seq.match = matchBase + pos - seq.offset;  /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
+                                                    * No consequence though : no memory access will occur, overly large offset will be detected in ZSTD_execSequenceLong() */
+        seqState->pos = pos + seq.matchLength;
+    }
+
+    /* ANS state update */
+    ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream);    /* <=  9 bits */
+    ZSTD_updateFseState(&seqState->stateML, &seqState->DStream);    /* <=  9 bits */
+    if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);    /* <= 18 bits */
+    ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream);  /* <=  8 bits */
+
+    return seq;
+}
+
+FORCE_INLINE_TEMPLATE size_t
+ZSTD_decompressSequencesLong_body(
+                               ZSTD_DCtx* dctx,
+                               void* dst, size_t maxDstSize,
+                         const void* seqStart, size_t seqSize, int nbSeq,
+                         const ZSTD_longOffset_e isLongOffset)
+{
+    const BYTE* ip = (const BYTE*)seqStart;
+    const BYTE* const iend = ip + seqSize;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* const oend = ostart + maxDstSize;
+    BYTE* op = ostart;
+    const BYTE* litPtr = dctx->litPtr;
+    const BYTE* const litEnd = litPtr + dctx->litSize;
+    const BYTE* const prefixStart = (const BYTE*) (dctx->base);
+    const BYTE* const dictStart = (const BYTE*) (dctx->vBase);
+    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+
+    /* Regen sequences */
+    if (nbSeq) {
+#define STORED_SEQS 4
+#define STOSEQ_MASK (STORED_SEQS-1)
+#define ADVANCED_SEQS 4
+        seq_t sequences[STORED_SEQS];
+        int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
+        seqState_t seqState;
+        int seqNb;
+        dctx->fseEntropy = 1;
+        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
+        seqState.prefixStart = prefixStart;
+        seqState.pos = (size_t)(op-prefixStart);
+        seqState.dictEnd = dictEnd;
+        CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
+        ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
+        ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
+        ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
+
+        /* prepare in advance */
+        for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
+            sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
+        }
+        if (seqNb<seqAdvance) return ERROR(corruption_detected);
+
+        /* decode and decompress */
+        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
+            seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
+            size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STOSEQ_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
+            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+            PREFETCH(sequence.match);  /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
+            sequences[seqNb&STOSEQ_MASK] = sequence;
+            op += oneSeqSize;
+        }
+        if (seqNb<nbSeq) return ERROR(corruption_detected);
+
+        /* finish queue */
+        seqNb -= seqAdvance;
+        for ( ; seqNb<nbSeq ; seqNb++) {
+            size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb&STOSEQ_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
+            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+            op += oneSeqSize;
+        }
+
+        /* save reps for next block */
+        { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
+#undef STORED_SEQS
+#undef STOSEQ_MASK
+#undef ADVANCED_SEQS
+    }
+
+    /* last literal segment */
+    {   size_t const lastLLSize = litEnd - litPtr;
+        if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
+        memcpy(op, litPtr, lastLLSize);
+        op += lastLLSize;
+    }
+
+    return op-ostart;
+}
+
+static size_t
+ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,
+                                 void* dst, size_t maxDstSize,
+                           const void* seqStart, size_t seqSize, int nbSeq,
+                           const ZSTD_longOffset_e isLongOffset)
+{
+    return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+
+
+
+#if DYNAMIC_BMI2
+
+static TARGET_ATTRIBUTE("bmi2") size_t
+ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
+                                 void* dst, size_t maxDstSize,
+                           const void* seqStart, size_t seqSize, int nbSeq,
+                           const ZSTD_longOffset_e isLongOffset)
+{
+    return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+
+static TARGET_ATTRIBUTE("bmi2") size_t
+ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
+                                 void* dst, size_t maxDstSize,
+                           const void* seqStart, size_t seqSize, int nbSeq,
+                           const ZSTD_longOffset_e isLongOffset)
+{
+    return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+
+#endif
+
+typedef size_t (*ZSTD_decompressSequences_t)(
+    ZSTD_DCtx *dctx, void *dst, size_t maxDstSize,
+    const void *seqStart, size_t seqSize, int nbSeq,
+    const ZSTD_longOffset_e isLongOffset);
+
+static size_t ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
+                                const void* seqStart, size_t seqSize, int nbSeq,
+                                const ZSTD_longOffset_e isLongOffset)
+{
+    DEBUGLOG(5, "ZSTD_decompressSequences");
+#if DYNAMIC_BMI2
+    if (dctx->bmi2) {
+        return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+    }
+#endif
+  return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+
+static size_t ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
+                                void* dst, size_t maxDstSize,
+                                const void* seqStart, size_t seqSize, int nbSeq,
+                                const ZSTD_longOffset_e isLongOffset)
+{
+    DEBUGLOG(5, "ZSTD_decompressSequencesLong");
+#if DYNAMIC_BMI2
+    if (dctx->bmi2) {
+        return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+    }
+#endif
+  return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
+}
+
+/* ZSTD_getLongOffsetsShare() :
+ * condition : offTable must be valid
+ * @return : "share" of long offsets (arbitrarily defined as > (1<<23))
+ *           compared to maximum possible of (1<<OffFSELog) */
+static unsigned
+ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable)
+{
+    const void* ptr = offTable;
+    U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
+    const ZSTD_seqSymbol* table = offTable + 1;
+    U32 const max = 1 << tableLog;
+    U32 u, total = 0;
+    DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
+
+    assert(max <= (1 << OffFSELog));  /* max not too large */
+    for (u=0; u<max; u++) {
+        if (table[u].nbAdditionalBits > 22) total += 1;
+    }
+
+    assert(tableLog <= OffFSELog);
+    total <<= (OffFSELog - tableLog);  /* scale to OffFSELog */
+
+    return total;
+}
+
+
+static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
+                            void* dst, size_t dstCapacity,
+                      const void* src, size_t srcSize, const int frame)
+{   /* blockType == blockCompressed */
+    const BYTE* ip = (const BYTE*)src;
+    /* isLongOffset must be true if there are long offsets.
+     * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
+     * We don't expect that to be the case in 64-bit mode.
+     * In block mode, window size is not known, so we have to be conservative. (note: but it could be evaluated from current-lowLimit)
+     */
+    ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN)));
+    DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
+
+    if (srcSize >= ZSTD_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);
+
+    /* Decode literals section */
+    {   size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
+        DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
+        if (ZSTD_isError(litCSize)) return litCSize;
+        ip += litCSize;
+        srcSize -= litCSize;
+    }
+
+    /* Build Decoding Tables */
+    {   int nbSeq;
+        size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);
+        if (ZSTD_isError(seqHSize)) return seqHSize;
+        ip += seqHSize;
+        srcSize -= seqHSize;
+
+        if ( (!frame || dctx->fParams.windowSize > (1<<24))
+          && (nbSeq>0) ) {  /* could probably use a larger nbSeq limit */
+            U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);
+            U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
+            if (shareLongOffsets >= minShare)
+                return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
+        }
+
+        return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
+    }
+}
+
+
+static void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
+{
+    if (dst != dctx->previousDstEnd) {   /* not contiguous */
+        dctx->dictEnd = dctx->previousDstEnd;
+        dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
+        dctx->base = dst;
+        dctx->previousDstEnd = dst;
+    }
+}
+
+size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
+                            void* dst, size_t dstCapacity,
+                      const void* src, size_t srcSize)
+{
+    size_t dSize;
+    ZSTD_checkContinuity(dctx, dst);
+    dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0);
+    dctx->previousDstEnd = (char*)dst + dSize;
+    return dSize;
+}
+
+
+/** ZSTD_insertBlock() :
+    insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
+ZSTDLIB_API size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
+{
+    ZSTD_checkContinuity(dctx, blockStart);
+    dctx->previousDstEnd = (const char*)blockStart + blockSize;
+    return blockSize;
+}
+
+
+static size_t ZSTD_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length)
+{
+    if (length > dstCapacity) return ERROR(dstSize_tooSmall);
+    memset(dst, byte, length);
+    return length;
+}
+
+/** ZSTD_findFrameCompressedSize() :
+ *  compatible with legacy mode
+ *  `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
+ *  `srcSize` must be at least as large as the frame contained
+ *  @return : the compressed size of the frame starting at `src` */
+size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
+{
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+    if (ZSTD_isLegacy(src, srcSize))
+        return ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
+#endif
+    if ( (srcSize >= ZSTD_skippableHeaderSize)
+      && (MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START ) {
+        return ZSTD_skippableHeaderSize + MEM_readLE32((const BYTE*)src + ZSTD_frameIdSize);
+    } else {
+        const BYTE* ip = (const BYTE*)src;
+        const BYTE* const ipstart = ip;
+        size_t remainingSize = srcSize;
+        ZSTD_frameHeader zfh;
+
+        /* Extract Frame Header */
+        {   size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
+            if (ZSTD_isError(ret)) return ret;
+            if (ret > 0) return ERROR(srcSize_wrong);
+        }
+
+        ip += zfh.headerSize;
+        remainingSize -= zfh.headerSize;
+
+        /* Loop on each block */
+        while (1) {
+            blockProperties_t blockProperties;
+            size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+            if (ZSTD_isError(cBlockSize)) return cBlockSize;
+
+            if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
+                return ERROR(srcSize_wrong);
+
+            ip += ZSTD_blockHeaderSize + cBlockSize;
+            remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
+
+            if (blockProperties.lastBlock) break;
+        }
+
+        if (zfh.checksumFlag) {   /* Final frame content checksum */
+            if (remainingSize < 4) return ERROR(srcSize_wrong);
+            ip += 4;
+            remainingSize -= 4;
+        }
+
+        return ip - ipstart;
+    }
+}
+
+/*! ZSTD_decompressFrame() :
+*   @dctx must be properly initialized */
+static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
+                                   void* dst, size_t dstCapacity,
+                             const void** srcPtr, size_t *srcSizePtr)
+{
+    const BYTE* ip = (const BYTE*)(*srcPtr);
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* const oend = ostart + dstCapacity;
+    BYTE* op = ostart;
+    size_t remainingSize = *srcSizePtr;
+
+    /* check */
+    if (remainingSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize)
+        return ERROR(srcSize_wrong);
+
+    /* Frame Header */
+    {   size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix);
+        if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
+        if (remainingSize < frameHeaderSize+ZSTD_blockHeaderSize)
+            return ERROR(srcSize_wrong);
+        CHECK_F( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) );
+        ip += frameHeaderSize; remainingSize -= frameHeaderSize;
+    }
+
+    /* Loop on each block */
+    while (1) {
+        size_t decodedSize;
+        blockProperties_t blockProperties;
+        size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+        if (ZSTD_isError(cBlockSize)) return cBlockSize;
+
+        ip += ZSTD_blockHeaderSize;
+        remainingSize -= ZSTD_blockHeaderSize;
+        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+        switch(blockProperties.blockType)
+        {
+        case bt_compressed:
+            decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize, /* frame */ 1);
+            break;
+        case bt_raw :
+            decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize);
+            break;
+        case bt_rle :
+            decodedSize = ZSTD_generateNxBytes(op, oend-op, *ip, blockProperties.origSize);
+            break;
+        case bt_reserved :
+        default:
+            return ERROR(corruption_detected);
+        }
+
+        if (ZSTD_isError(decodedSize)) return decodedSize;
+        if (dctx->fParams.checksumFlag)
+            XXH64_update(&dctx->xxhState, op, decodedSize);
+        op += decodedSize;
+        ip += cBlockSize;
+        remainingSize -= cBlockSize;
+        if (blockProperties.lastBlock) break;
+    }
+
+    if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
+        if ((U64)(op-ostart) != dctx->fParams.frameContentSize) {
+            return ERROR(corruption_detected);
+    }   }
+    if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
+        U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState);
+        U32 checkRead;
+        if (remainingSize<4) return ERROR(checksum_wrong);
+        checkRead = MEM_readLE32(ip);
+        if (checkRead != checkCalc) return ERROR(checksum_wrong);
+        ip += 4;
+        remainingSize -= 4;
+    }
+
+    /* Allow caller to get size read */
+    *srcPtr = ip;
+    *srcSizePtr = remainingSize;
+    return op-ostart;
+}
+
+static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict);
+static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict);
+
+static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
+                                        void* dst, size_t dstCapacity,
+                                  const void* src, size_t srcSize,
+                                  const void* dict, size_t dictSize,
+                                  const ZSTD_DDict* ddict)
+{
+    void* const dststart = dst;
+    assert(dict==NULL || ddict==NULL);  /* either dict or ddict set, not both */
+
+    if (ddict) {
+        dict = ZSTD_DDictDictContent(ddict);
+        dictSize = ZSTD_DDictDictSize(ddict);
+    }
+
+    while (srcSize >= ZSTD_frameHeaderSize_prefix) {
+        U32 magicNumber;
+
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
+        if (ZSTD_isLegacy(src, srcSize)) {
+            size_t decodedSize;
+            size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
+            if (ZSTD_isError(frameSize)) return frameSize;
+            /* legacy support is not compatible with static dctx */
+            if (dctx->staticSize) return ERROR(memory_allocation);
+
+            decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize);
+
+            dst = (BYTE*)dst + decodedSize;
+            dstCapacity -= decodedSize;
+
+            src = (const BYTE*)src + frameSize;
+            srcSize -= frameSize;
+
+            continue;
+        }
+#endif
+
+        magicNumber = MEM_readLE32(src);
+        DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
+                    (U32)magicNumber, (U32)ZSTD_MAGICNUMBER);
+        if (magicNumber != ZSTD_MAGICNUMBER) {
+            if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
+                size_t skippableSize;
+                if (srcSize < ZSTD_skippableHeaderSize)
+                    return ERROR(srcSize_wrong);
+                skippableSize = MEM_readLE32((const BYTE*)src + ZSTD_frameIdSize)
+                              + ZSTD_skippableHeaderSize;
+                if (srcSize < skippableSize) return ERROR(srcSize_wrong);
+
+                src = (const BYTE *)src + skippableSize;
+                srcSize -= skippableSize;
+                continue;
+            }
+            return ERROR(prefix_unknown);
+        }
+
+        if (ddict) {
+            /* we were called from ZSTD_decompress_usingDDict */
+            CHECK_F(ZSTD_decompressBegin_usingDDict(dctx, ddict));
+        } else {
+            /* this will initialize correctly with no dict if dict == NULL, so
+             * use this in all cases but ddict */
+            CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
+        }
+        ZSTD_checkContinuity(dctx, dst);
+
+        {   const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
+                                                    &src, &srcSize);
+            if (ZSTD_isError(res)) return res;
+            /* no need to bound check, ZSTD_decompressFrame already has */
+            dst = (BYTE*)dst + res;
+            dstCapacity -= res;
+        }
+    }  /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
+
+    if (srcSize) return ERROR(srcSize_wrong); /* input not entirely consumed */
+
+    return (BYTE*)dst - (BYTE*)dststart;
+}
+
+size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
+                                 void* dst, size_t dstCapacity,
+                           const void* src, size_t srcSize,
+                           const void* dict, size_t dictSize)
+{
+    return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
+}
+
+
+size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
+}
+
+
+size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
+    size_t regenSize;
+    ZSTD_DCtx* const dctx = ZSTD_createDCtx();
+    if (dctx==NULL) return ERROR(memory_allocation);
+    regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
+    ZSTD_freeDCtx(dctx);
+    return regenSize;
+#else   /* stack mode */
+    ZSTD_DCtx dctx;
+    return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
+#endif
+}
+
+
+/*-**************************************
+*   Advanced Streaming Decompression API
+*   Bufferless and synchronous
+****************************************/
+size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; }
+
+ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
+    switch(dctx->stage)
+    {
+    default:   /* should not happen */
+        assert(0);
+    case ZSTDds_getFrameHeaderSize:
+    case ZSTDds_decodeFrameHeader:
+        return ZSTDnit_frameHeader;
+    case ZSTDds_decodeBlockHeader:
+        return ZSTDnit_blockHeader;
+    case ZSTDds_decompressBlock:
+        return ZSTDnit_block;
+    case ZSTDds_decompressLastBlock:
+        return ZSTDnit_lastBlock;
+    case ZSTDds_checkChecksum:
+        return ZSTDnit_checksum;
+    case ZSTDds_decodeSkippableHeader:
+    case ZSTDds_skipFrame:
+        return ZSTDnit_skippableFrame;
+    }
+}
+
+static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; }
+
+/** ZSTD_decompressContinue() :
+ *  srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress())
+ *  @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
+ *            or an error code, which can be tested using ZSTD_isError() */
+size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (U32)srcSize);
+    /* Sanity check */
+    if (srcSize != dctx->expected) return ERROR(srcSize_wrong);  /* not allowed */
+    if (dstCapacity) ZSTD_checkContinuity(dctx, dst);
+
+    switch (dctx->stage)
+    {
+    case ZSTDds_getFrameHeaderSize :
+        assert(src != NULL);
+        if (dctx->format == ZSTD_f_zstd1) {  /* allows header */
+            assert(srcSize >= ZSTD_frameIdSize);  /* to read skippable magic number */
+            if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {        /* skippable frame */
+                memcpy(dctx->headerBuffer, src, srcSize);
+                dctx->expected = ZSTD_skippableHeaderSize - srcSize;  /* remaining to load to get full skippable frame header */
+                dctx->stage = ZSTDds_decodeSkippableHeader;
+                return 0;
+        }   }
+        dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format);
+        if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize;
+        memcpy(dctx->headerBuffer, src, srcSize);
+        dctx->expected = dctx->headerSize - srcSize;
+        dctx->stage = ZSTDds_decodeFrameHeader;
+        return 0;
+
+    case ZSTDds_decodeFrameHeader:
+        assert(src != NULL);
+        memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
+        CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
+        dctx->expected = ZSTD_blockHeaderSize;
+        dctx->stage = ZSTDds_decodeBlockHeader;
+        return 0;
+
+    case ZSTDds_decodeBlockHeader:
+        {   blockProperties_t bp;
+            size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
+            if (ZSTD_isError(cBlockSize)) return cBlockSize;
+            dctx->expected = cBlockSize;
+            dctx->bType = bp.blockType;
+            dctx->rleSize = bp.origSize;
+            if (cBlockSize) {
+                dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;
+                return 0;
+            }
+            /* empty block */
+            if (bp.lastBlock) {
+                if (dctx->fParams.checksumFlag) {
+                    dctx->expected = 4;
+                    dctx->stage = ZSTDds_checkChecksum;
+                } else {
+                    dctx->expected = 0; /* end of frame */
+                    dctx->stage = ZSTDds_getFrameHeaderSize;
+                }
+            } else {
+                dctx->expected = ZSTD_blockHeaderSize;  /* jump to next header */
+                dctx->stage = ZSTDds_decodeBlockHeader;
+            }
+            return 0;
+        }
+
+    case ZSTDds_decompressLastBlock:
+    case ZSTDds_decompressBlock:
+        DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock");
+        {   size_t rSize;
+            switch(dctx->bType)
+            {
+            case bt_compressed:
+                DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
+                rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1);
+                break;
+            case bt_raw :
+                rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize);
+                break;
+            case bt_rle :
+                rSize = ZSTD_setRleBlock(dst, dstCapacity, src, srcSize, dctx->rleSize);
+                break;
+            case bt_reserved :   /* should never happen */
+            default:
+                return ERROR(corruption_detected);
+            }
+            if (ZSTD_isError(rSize)) return rSize;
+            DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (U32)rSize);
+            dctx->decodedSize += rSize;
+            if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);
+
+            if (dctx->stage == ZSTDds_decompressLastBlock) {   /* end of frame */
+                DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (U32)dctx->decodedSize);
+                if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
+                    if (dctx->decodedSize != dctx->fParams.frameContentSize) {
+                        return ERROR(corruption_detected);
+                }   }
+                if (dctx->fParams.checksumFlag) {  /* another round for frame checksum */
+                    dctx->expected = 4;
+                    dctx->stage = ZSTDds_checkChecksum;
+                } else {
+                    dctx->expected = 0;   /* ends here */
+                    dctx->stage = ZSTDds_getFrameHeaderSize;
+                }
+            } else {
+                dctx->stage = ZSTDds_decodeBlockHeader;
+                dctx->expected = ZSTD_blockHeaderSize;
+                dctx->previousDstEnd = (char*)dst + rSize;
+            }
+            return rSize;
+        }
+
+    case ZSTDds_checkChecksum:
+        assert(srcSize == 4);  /* guaranteed by dctx->expected */
+        {   U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
+            U32 const check32 = MEM_readLE32(src);
+            DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", h32, check32);
+            if (check32 != h32) return ERROR(checksum_wrong);
+            dctx->expected = 0;
+            dctx->stage = ZSTDds_getFrameHeaderSize;
+            return 0;
+        }
+
+    case ZSTDds_decodeSkippableHeader:
+        assert(src != NULL);
+        assert(srcSize <= ZSTD_skippableHeaderSize);
+        memcpy(dctx->headerBuffer + (ZSTD_skippableHeaderSize - srcSize), src, srcSize);   /* complete skippable header */
+        dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_frameIdSize);   /* note : dctx->expected can grow seriously large, beyond local buffer size */
+        dctx->stage = ZSTDds_skipFrame;
+        return 0;
+
+    case ZSTDds_skipFrame:
+        dctx->expected = 0;
+        dctx->stage = ZSTDds_getFrameHeaderSize;
+        return 0;
+
+    default:
+        return ERROR(GENERIC);   /* impossible */
+    }
+}
+
+
+static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+    dctx->dictEnd = dctx->previousDstEnd;
+    dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
+    dctx->base = dict;
+    dctx->previousDstEnd = (const char*)dict + dictSize;
+    return 0;
+}
+
+/* ZSTD_loadEntropy() :
+ * dict : must point at beginning of a valid zstd dictionary
+ * @return : size of entropy tables read */
+static size_t ZSTD_loadEntropy(ZSTD_entropyDTables_t* entropy, const void* const dict, size_t const dictSize)
+{
+    const BYTE* dictPtr = (const BYTE*)dict;
+    const BYTE* const dictEnd = dictPtr + dictSize;
+
+    if (dictSize <= 8) return ERROR(dictionary_corrupted);
+    dictPtr += 8;   /* skip header = magic + dictID */
+
+
+    {   size_t const hSize = HUF_readDTableX4_wksp(
+            entropy->hufTable, dictPtr, dictEnd - dictPtr,
+            entropy->workspace, sizeof(entropy->workspace));
+        if (HUF_isError(hSize)) return ERROR(dictionary_corrupted);
+        dictPtr += hSize;
+    }
+
+    {   short offcodeNCount[MaxOff+1];
+        U32 offcodeMaxValue = MaxOff, offcodeLog;
+        size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
+        if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
+        if (offcodeMaxValue > MaxOff) return ERROR(dictionary_corrupted);
+        if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
+        ZSTD_buildFSETable(entropy->OFTable,
+                            offcodeNCount, offcodeMaxValue,
+                            OF_base, OF_bits,
+                            offcodeLog);
+        dictPtr += offcodeHeaderSize;
+    }
+
+    {   short matchlengthNCount[MaxML+1];
+        unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+        size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
+        if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
+        if (matchlengthMaxValue > MaxML) return ERROR(dictionary_corrupted);
+        if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
+        ZSTD_buildFSETable(entropy->MLTable,
+                            matchlengthNCount, matchlengthMaxValue,
+                            ML_base, ML_bits,
+                            matchlengthLog);
+        dictPtr += matchlengthHeaderSize;
+    }
+
+    {   short litlengthNCount[MaxLL+1];
+        unsigned litlengthMaxValue = MaxLL, litlengthLog;
+        size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
+        if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
+        if (litlengthMaxValue > MaxLL) return ERROR(dictionary_corrupted);
+        if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
+        ZSTD_buildFSETable(entropy->LLTable,
+                            litlengthNCount, litlengthMaxValue,
+                            LL_base, LL_bits,
+                            litlengthLog);
+        dictPtr += litlengthHeaderSize;
+    }
+
+    if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
+    {   int i;
+        size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
+        for (i=0; i<3; i++) {
+            U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
+            if (rep==0 || rep >= dictContentSize) return ERROR(dictionary_corrupted);
+            entropy->rep[i] = rep;
+    }   }
+
+    return dictPtr - (const BYTE*)dict;
+}
+
+static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+    if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize);
+    {   U32 const magic = MEM_readLE32(dict);
+        if (magic != ZSTD_MAGIC_DICTIONARY) {
+            return ZSTD_refDictContent(dctx, dict, dictSize);   /* pure content mode */
+    }   }
+    dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_frameIdSize);
+
+    /* load entropy tables */
+    {   size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize);
+        if (ZSTD_isError(eSize)) return ERROR(dictionary_corrupted);
+        dict = (const char*)dict + eSize;
+        dictSize -= eSize;
+    }
+    dctx->litEntropy = dctx->fseEntropy = 1;
+
+    /* reference dictionary content */
+    return ZSTD_refDictContent(dctx, dict, dictSize);
+}
+
+/* Note : this function cannot fail */
+size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
+{
+    assert(dctx != NULL);
+    dctx->expected = ZSTD_startingInputLength(dctx->format);  /* dctx->format must be properly set */
+    dctx->stage = ZSTDds_getFrameHeaderSize;
+    dctx->decodedSize = 0;
+    dctx->previousDstEnd = NULL;
+    dctx->base = NULL;
+    dctx->vBase = NULL;
+    dctx->dictEnd = NULL;
+    dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001);  /* cover both little and big endian */
+    dctx->litEntropy = dctx->fseEntropy = 0;
+    dctx->dictID = 0;
+    ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
+    memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue));  /* initial repcodes */
+    dctx->LLTptr = dctx->entropy.LLTable;
+    dctx->MLTptr = dctx->entropy.MLTable;
+    dctx->OFTptr = dctx->entropy.OFTable;
+    dctx->HUFptr = dctx->entropy.hufTable;
+    return 0;
+}
+
+size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+    CHECK_F( ZSTD_decompressBegin(dctx) );
+    if (dict && dictSize)
+        CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted);
+    return 0;
+}
+
+
+/* ======   ZSTD_DDict   ====== */
+
+struct ZSTD_DDict_s {
+    void* dictBuffer;
+    const void* dictContent;
+    size_t dictSize;
+    ZSTD_entropyDTables_t entropy;
+    U32 dictID;
+    U32 entropyPresent;
+    ZSTD_customMem cMem;
+};  /* typedef'd to ZSTD_DDict within "zstd.h" */
+
+static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict)
+{
+    return ddict->dictContent;
+}
+
+static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict)
+{
+    return ddict->dictSize;
+}
+
+size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dstDCtx, const ZSTD_DDict* ddict)
+{
+    CHECK_F( ZSTD_decompressBegin(dstDCtx) );
+    if (ddict) {   /* support begin on NULL */
+        dstDCtx->dictID = ddict->dictID;
+        dstDCtx->base = ddict->dictContent;
+        dstDCtx->vBase = ddict->dictContent;
+        dstDCtx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
+        dstDCtx->previousDstEnd = dstDCtx->dictEnd;
+        if (ddict->entropyPresent) {
+            dstDCtx->litEntropy = 1;
+            dstDCtx->fseEntropy = 1;
+            dstDCtx->LLTptr = ddict->entropy.LLTable;
+            dstDCtx->MLTptr = ddict->entropy.MLTable;
+            dstDCtx->OFTptr = ddict->entropy.OFTable;
+            dstDCtx->HUFptr = ddict->entropy.hufTable;
+            dstDCtx->entropy.rep[0] = ddict->entropy.rep[0];
+            dstDCtx->entropy.rep[1] = ddict->entropy.rep[1];
+            dstDCtx->entropy.rep[2] = ddict->entropy.rep[2];
+        } else {
+            dstDCtx->litEntropy = 0;
+            dstDCtx->fseEntropy = 0;
+        }
+    }
+    return 0;
+}
+
+static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict* ddict, ZSTD_dictContentType_e dictContentType)
+{
+    ddict->dictID = 0;
+    ddict->entropyPresent = 0;
+    if (dictContentType == ZSTD_dct_rawContent) return 0;
+
+    if (ddict->dictSize < 8) {
+        if (dictContentType == ZSTD_dct_fullDict)
+            return ERROR(dictionary_corrupted);   /* only accept specified dictionaries */
+        return 0;   /* pure content mode */
+    }
+    {   U32 const magic = MEM_readLE32(ddict->dictContent);
+        if (magic != ZSTD_MAGIC_DICTIONARY) {
+            if (dictContentType == ZSTD_dct_fullDict)
+                return ERROR(dictionary_corrupted);   /* only accept specified dictionaries */
+            return 0;   /* pure content mode */
+        }
+    }
+    ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_frameIdSize);
+
+    /* load entropy tables */
+    CHECK_E( ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted );
+    ddict->entropyPresent = 1;
+    return 0;
+}
+
+
+static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
+                                      const void* dict, size_t dictSize,
+                                      ZSTD_dictLoadMethod_e dictLoadMethod,
+                                      ZSTD_dictContentType_e dictContentType)
+{
+    if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {
+        ddict->dictBuffer = NULL;
+        ddict->dictContent = dict;
+    } else {
+        void* const internalBuffer = ZSTD_malloc(dictSize, ddict->cMem);
+        ddict->dictBuffer = internalBuffer;
+        ddict->dictContent = internalBuffer;
+        if (!internalBuffer) return ERROR(memory_allocation);
+        memcpy(internalBuffer, dict, dictSize);
+    }
+    ddict->dictSize = dictSize;
+    ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001);  /* cover both little and big endian */
+
+    /* parse dictionary content */
+    CHECK_F( ZSTD_loadEntropy_inDDict(ddict, dictContentType) );
+
+    return 0;
+}
+
+ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
+                                      ZSTD_dictLoadMethod_e dictLoadMethod,
+                                      ZSTD_dictContentType_e dictContentType,
+                                      ZSTD_customMem customMem)
+{
+    if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
+
+    {   ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
+        if (!ddict) return NULL;
+        ddict->cMem = customMem;
+
+        if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, dictLoadMethod, dictContentType) )) {
+            ZSTD_freeDDict(ddict);
+            return NULL;
+        }
+
+        return ddict;
+    }
+}
+
+/*! ZSTD_createDDict() :
+*   Create a digested dictionary, to start decompression without startup delay.
+*   `dict` content is copied inside DDict.
+*   Consequently, `dict` can be released after `ZSTD_DDict` creation */
+ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
+{
+    ZSTD_customMem const allocator = { NULL, NULL, NULL };
+    return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator);
+}
+
+/*! ZSTD_createDDict_byReference() :
+ *  Create a digested dictionary, to start decompression without startup delay.
+ *  Dictionary content is simply referenced, it will be accessed during decompression.
+ *  Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */
+ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
+{
+    ZSTD_customMem const allocator = { NULL, NULL, NULL };
+    return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator);
+}
+
+
+const ZSTD_DDict* ZSTD_initStaticDDict(
+                                void* workspace, size_t workspaceSize,
+                                const void* dict, size_t dictSize,
+                                ZSTD_dictLoadMethod_e dictLoadMethod,
+                                ZSTD_dictContentType_e dictContentType)
+{
+    size_t const neededSpace =
+            sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
+    ZSTD_DDict* const ddict = (ZSTD_DDict*)workspace;
+    assert(workspace != NULL);
+    assert(dict != NULL);
+    if ((size_t)workspace & 7) return NULL;  /* 8-aligned */
+    if (workspaceSize < neededSpace) return NULL;
+    if (dictLoadMethod == ZSTD_dlm_byCopy) {
+        memcpy(ddict+1, dict, dictSize);  /* local copy */
+        dict = ddict+1;
+    }
+    if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, ZSTD_dlm_byRef, dictContentType) ))
+        return NULL;
+    return ddict;
+}
+
+
+size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
+{
+    if (ddict==NULL) return 0;   /* support free on NULL */
+    {   ZSTD_customMem const cMem = ddict->cMem;
+        ZSTD_free(ddict->dictBuffer, cMem);
+        ZSTD_free(ddict, cMem);
+        return 0;
+    }
+}
+
+/*! ZSTD_estimateDDictSize() :
+ *  Estimate amount of memory that will be needed to create a dictionary for decompression.
+ *  Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */
+size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)
+{
+    return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
+}
+
+size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
+{
+    if (ddict==NULL) return 0;   /* support sizeof on NULL */
+    return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
+}
+
+/*! ZSTD_getDictID_fromDict() :
+ *  Provides the dictID stored within dictionary.
+ *  if @return == 0, the dictionary is not conformant with Zstandard specification.
+ *  It can still be loaded, but as a content-only dictionary. */
+unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
+{
+    if (dictSize < 8) return 0;
+    if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0;
+    return MEM_readLE32((const char*)dict + ZSTD_frameIdSize);
+}
+
+/*! ZSTD_getDictID_fromDDict() :
+ *  Provides the dictID of the dictionary loaded into `ddict`.
+ *  If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
+ *  Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
+unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
+{
+    if (ddict==NULL) return 0;
+    return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
+}
+
+/*! ZSTD_getDictID_fromFrame() :
+ *  Provides the dictID required to decompresse frame stored within `src`.
+ *  If @return == 0, the dictID could not be decoded.
+ *  This could for one of the following reasons :
+ *  - The frame does not require a dictionary (most common case).
+ *  - The frame was built with dictID intentionally removed.
+ *    Needed dictionary is a hidden information.
+ *    Note : this use case also happens when using a non-conformant dictionary.
+ *  - `srcSize` is too small, and as a result, frame header could not be decoded.
+ *    Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`.
+ *  - This is not a Zstandard frame.
+ *  When identifying the exact failure cause, it's possible to use
+ *  ZSTD_getFrameHeader(), which will provide a more precise error code. */
+unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
+{
+    ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 };
+    size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
+    if (ZSTD_isError(hError)) return 0;
+    return zfp.dictID;
+}
+
+
+/*! ZSTD_decompress_usingDDict() :
+*   Decompression using a pre-digested Dictionary
+*   Use dictionary without significant overhead. */
+size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
+                                  void* dst, size_t dstCapacity,
+                            const void* src, size_t srcSize,
+                            const ZSTD_DDict* ddict)
+{
+    /* pass content and size in case legacy frames are encountered */
+    return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize,
+                                     NULL, 0,
+                                     ddict);
+}
+
+
+/*=====================================
+*   Streaming decompression
+*====================================*/
+
+ZSTD_DStream* ZSTD_createDStream(void)
+{
+    DEBUGLOG(3, "ZSTD_createDStream");
+    return ZSTD_createDStream_advanced(ZSTD_defaultCMem);
+}
+
+ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize)
+{
+    return ZSTD_initStaticDCtx(workspace, workspaceSize);
+}
+
+ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem)
+{
+    return ZSTD_createDCtx_advanced(customMem);
+}
+
+size_t ZSTD_freeDStream(ZSTD_DStream* zds)
+{
+    return ZSTD_freeDCtx(zds);
+}
+
+
+/* *** Initialization *** */
+
+size_t ZSTD_DStreamInSize(void)  { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; }
+size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; }
+
+size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
+{
+    if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
+    ZSTD_freeDDict(dctx->ddictLocal);
+    if (dict && dictSize >= 8) {
+        dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
+        if (dctx->ddictLocal == NULL) return ERROR(memory_allocation);
+    } else {
+        dctx->ddictLocal = NULL;
+    }
+    dctx->ddict = dctx->ddictLocal;
+    return 0;
+}
+
+size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+    return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
+}
+
+size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
+{
+    return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
+}
+
+size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
+{
+    return ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType);
+}
+
+size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize)
+{
+    return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent);
+}
+
+
+/* ZSTD_initDStream_usingDict() :
+ * return : expected size, aka ZSTD_frameHeaderSize_prefix.
+ * this function cannot fail */
+size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
+{
+    DEBUGLOG(4, "ZSTD_initDStream_usingDict");
+    zds->streamStage = zdss_init;
+    CHECK_F( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) );
+    return ZSTD_frameHeaderSize_prefix;
+}
+
+/* note : this variant can't fail */
+size_t ZSTD_initDStream(ZSTD_DStream* zds)
+{
+    DEBUGLOG(4, "ZSTD_initDStream");
+    return ZSTD_initDStream_usingDict(zds, NULL, 0);
+}
+
+size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
+{
+    if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
+    dctx->ddict = ddict;
+    return 0;
+}
+
+/* ZSTD_initDStream_usingDDict() :
+ * ddict will just be referenced, and must outlive decompression session
+ * this function cannot fail */
+size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
+{
+    size_t const initResult = ZSTD_initDStream(dctx);
+    dctx->ddict = ddict;
+    return initResult;
+}
+
+/* ZSTD_resetDStream() :
+ * return : expected size, aka ZSTD_frameHeaderSize_prefix.
+ * this function cannot fail */
+size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
+{
+    DEBUGLOG(4, "ZSTD_resetDStream");
+    dctx->streamStage = zdss_loadHeader;
+    dctx->lhSize = dctx->inPos = dctx->outStart = dctx->outEnd = 0;
+    dctx->legacyVersion = 0;
+    dctx->hostageByte = 0;
+    return ZSTD_frameHeaderSize_prefix;
+}
+
+size_t ZSTD_setDStreamParameter(ZSTD_DStream* dctx,
+                                ZSTD_DStreamParameter_e paramType, unsigned paramValue)
+{
+    if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
+    switch(paramType)
+    {
+        default : return ERROR(parameter_unsupported);
+        case DStream_p_maxWindowSize :
+            DEBUGLOG(4, "setting maxWindowSize = %u KB", paramValue >> 10);
+            dctx->maxWindowSize = paramValue ? paramValue : (U32)(-1);
+            break;
+    }
+    return 0;
+}
+
+size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
+{
+    if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
+    dctx->maxWindowSize = maxWindowSize;
+    return 0;
+}
+
+size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format)
+{
+    DEBUGLOG(4, "ZSTD_DCtx_setFormat : %u", (unsigned)format);
+    if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
+    dctx->format = format;
+    return 0;
+}
+
+
+size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx)
+{
+    return ZSTD_sizeof_DCtx(dctx);
+}
+
+size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
+{
+    size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+    unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
+    unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
+    size_t const minRBSize = (size_t) neededSize;
+    if ((unsigned long long)minRBSize != neededSize) return ERROR(frameParameter_windowTooLarge);
+    return minRBSize;
+}
+
+size_t ZSTD_estimateDStreamSize(size_t windowSize)
+{
+    size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
+    size_t const inBuffSize = blockSize;  /* no block can be larger */
+    size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN);
+    return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize;
+}
+
+size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
+{
+    U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX;   /* note : should be user-selectable */
+    ZSTD_frameHeader zfh;
+    size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
+    if (ZSTD_isError(err)) return err;
+    if (err>0) return ERROR(srcSize_wrong);
+    if (zfh.windowSize > windowSizeMax)
+        return ERROR(frameParameter_windowTooLarge);
+    return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
+}
+
+
+/* *****   Decompression   ***** */
+
+MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    size_t const length = MIN(dstCapacity, srcSize);
+    memcpy(dst, src, length);
+    return length;
+}
+
+
+size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+    const char* const istart = (const char*)(input->src) + input->pos;
+    const char* const iend = (const char*)(input->src) + input->size;
+    const char* ip = istart;
+    char* const ostart = (char*)(output->dst) + output->pos;
+    char* const oend = (char*)(output->dst) + output->size;
+    char* op = ostart;
+    U32 someMoreWork = 1;
+
+    DEBUGLOG(5, "ZSTD_decompressStream");
+    if (input->pos > input->size) {  /* forbidden */
+        DEBUGLOG(5, "in: pos: %u   vs size: %u",
+                    (U32)input->pos, (U32)input->size);
+        return ERROR(srcSize_wrong);
+    }
+    if (output->pos > output->size) {  /* forbidden */
+        DEBUGLOG(5, "out: pos: %u   vs size: %u",
+                    (U32)output->pos, (U32)output->size);
+        return ERROR(dstSize_tooSmall);
+    }
+    DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
+
+    while (someMoreWork) {
+        switch(zds->streamStage)
+        {
+        case zdss_init :
+            DEBUGLOG(5, "stage zdss_init => transparent reset ");
+            ZSTD_resetDStream(zds);   /* transparent reset on starting decoding a new frame */
+            /* fall-through */
+
+        case zdss_loadHeader :
+            DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
+            if (zds->legacyVersion) {
+                /* legacy support is incompatible with static dctx */
+                if (zds->staticSize) return ERROR(memory_allocation);
+                {   size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input);
+                    if (hint==0) zds->streamStage = zdss_init;
+                    return hint;
+            }   }
+#endif
+            {   size_t const hSize = ZSTD_getFrameHeader_internal(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);
+                DEBUGLOG(5, "header size : %u", (U32)hSize);
+                if (ZSTD_isError(hSize)) {
+#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
+                    U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart);
+                    if (legacyVersion) {
+                        const void* const dict = zds->ddict ? zds->ddict->dictContent : NULL;
+                        size_t const dictSize = zds->ddict ? zds->ddict->dictSize : 0;
+                        DEBUGLOG(5, "ZSTD_decompressStream: detected legacy version v0.%u", legacyVersion);
+                        /* legacy support is incompatible with static dctx */
+                        if (zds->staticSize) return ERROR(memory_allocation);
+                        CHECK_F(ZSTD_initLegacyStream(&zds->legacyContext,
+                                    zds->previousLegacyVersion, legacyVersion,
+                                    dict, dictSize));
+                        zds->legacyVersion = zds->previousLegacyVersion = legacyVersion;
+                        {   size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, legacyVersion, output, input);
+                            if (hint==0) zds->streamStage = zdss_init;   /* or stay in stage zdss_loadHeader */
+                            return hint;
+                    }   }
+#endif
+                    return hSize;   /* error */
+                }
+                if (hSize != 0) {   /* need more input */
+                    size_t const toLoad = hSize - zds->lhSize;   /* if hSize!=0, hSize > zds->lhSize */
+                    size_t const remainingInput = (size_t)(iend-ip);
+                    assert(iend >= ip);
+                    if (toLoad > remainingInput) {   /* not enough input to load full header */
+                        if (remainingInput > 0) {
+                            memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput);
+                            zds->lhSize += remainingInput;
+                        }
+                        input->pos = input->size;
+                        return (MAX(ZSTD_frameHeaderSize_min, hSize) - zds->lhSize) + ZSTD_blockHeaderSize;   /* remaining header bytes + next block header */
+                    }
+                    assert(ip != NULL);
+                    memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
+                    break;
+            }   }
+
+            /* check for single-pass mode opportunity */
+            if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */
+                && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
+                size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend-istart);
+                if (cSize <= (size_t)(iend-istart)) {
+                    /* shortcut : using single-pass mode */
+                    size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, oend-op, istart, cSize, zds->ddict);
+                    if (ZSTD_isError(decompressedSize)) return decompressedSize;
+                    DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
+                    ip = istart + cSize;
+                    op += decompressedSize;
+                    zds->expected = 0;
+                    zds->streamStage = zdss_init;
+                    someMoreWork = 0;
+                    break;
+            }   }
+
+            /* Consume header (see ZSTDds_decodeFrameHeader) */
+            DEBUGLOG(4, "Consume header");
+            CHECK_F(ZSTD_decompressBegin_usingDDict(zds, zds->ddict));
+
+            if ((MEM_readLE32(zds->headerBuffer) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {  /* skippable frame */
+                zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_frameIdSize);
+                zds->stage = ZSTDds_skipFrame;
+            } else {
+                CHECK_F(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize));
+                zds->expected = ZSTD_blockHeaderSize;
+                zds->stage = ZSTDds_decodeBlockHeader;
+            }
+
+            /* control buffer memory usage */
+            DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)",
+                        (U32)(zds->fParams.windowSize >>10),
+                        (U32)(zds->maxWindowSize >> 10) );
+            zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
+            if (zds->fParams.windowSize > zds->maxWindowSize) return ERROR(frameParameter_windowTooLarge);
+
+            /* Adapt buffer sizes to frame header instructions */
+            {   size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
+                size_t const neededOutBuffSize = ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize);
+                if ((zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize)) {
+                    size_t const bufferSize = neededInBuffSize + neededOutBuffSize;
+                    DEBUGLOG(4, "inBuff  : from %u to %u",
+                                (U32)zds->inBuffSize, (U32)neededInBuffSize);
+                    DEBUGLOG(4, "outBuff : from %u to %u",
+                                (U32)zds->outBuffSize, (U32)neededOutBuffSize);
+                    if (zds->staticSize) {  /* static DCtx */
+                        DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize);
+                        assert(zds->staticSize >= sizeof(ZSTD_DCtx));  /* controlled at init */
+                        if (bufferSize > zds->staticSize - sizeof(ZSTD_DCtx))
+                            return ERROR(memory_allocation);
+                    } else {
+                        ZSTD_free(zds->inBuff, zds->customMem);
+                        zds->inBuffSize = 0;
+                        zds->outBuffSize = 0;
+                        zds->inBuff = (char*)ZSTD_malloc(bufferSize, zds->customMem);
+                        if (zds->inBuff == NULL) return ERROR(memory_allocation);
+                    }
+                    zds->inBuffSize = neededInBuffSize;
+                    zds->outBuff = zds->inBuff + zds->inBuffSize;
+                    zds->outBuffSize = neededOutBuffSize;
+            }   }
+            zds->streamStage = zdss_read;
+            /* fall-through */
+
+        case zdss_read:
+            DEBUGLOG(5, "stage zdss_read");
+            {   size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
+                DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize);
+                if (neededInSize==0) {  /* end of frame */
+                    zds->streamStage = zdss_init;
+                    someMoreWork = 0;
+                    break;
+                }
+                if ((size_t)(iend-ip) >= neededInSize) {  /* decode directly from src */
+                    int const isSkipFrame = ZSTD_isSkipFrame(zds);
+                    size_t const decodedSize = ZSTD_decompressContinue(zds,
+                        zds->outBuff + zds->outStart, (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart),
+                        ip, neededInSize);
+                    if (ZSTD_isError(decodedSize)) return decodedSize;
+                    ip += neededInSize;
+                    if (!decodedSize && !isSkipFrame) break;   /* this was just a header */
+                    zds->outEnd = zds->outStart + decodedSize;
+                    zds->streamStage = zdss_flush;
+                    break;
+            }   }
+            if (ip==iend) { someMoreWork = 0; break; }   /* no more input */
+            zds->streamStage = zdss_load;
+            /* fall-through */
+
+        case zdss_load:
+            {   size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
+                size_t const toLoad = neededInSize - zds->inPos;
+                int const isSkipFrame = ZSTD_isSkipFrame(zds);
+                size_t loadedSize;
+                if (isSkipFrame) {
+                    loadedSize = MIN(toLoad, (size_t)(iend-ip));
+                } else {
+                    if (toLoad > zds->inBuffSize - zds->inPos) return ERROR(corruption_detected);   /* should never happen */
+                    loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend-ip);
+                }
+                ip += loadedSize;
+                zds->inPos += loadedSize;
+                if (loadedSize < toLoad) { someMoreWork = 0; break; }   /* not enough input, wait for more */
+
+                /* decode loaded input */
+                {   size_t const decodedSize = ZSTD_decompressContinue(zds,
+                        zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart,
+                        zds->inBuff, neededInSize);
+                    if (ZSTD_isError(decodedSize)) return decodedSize;
+                    zds->inPos = 0;   /* input is consumed */
+                    if (!decodedSize && !isSkipFrame) { zds->streamStage = zdss_read; break; }   /* this was just a header */
+                    zds->outEnd = zds->outStart +  decodedSize;
+            }   }
+            zds->streamStage = zdss_flush;
+            /* fall-through */
+
+        case zdss_flush:
+            {   size_t const toFlushSize = zds->outEnd - zds->outStart;
+                size_t const flushedSize = ZSTD_limitCopy(op, oend-op, zds->outBuff + zds->outStart, toFlushSize);
+                op += flushedSize;
+                zds->outStart += flushedSize;
+                if (flushedSize == toFlushSize) {  /* flush completed */
+                    zds->streamStage = zdss_read;
+                    if ( (zds->outBuffSize < zds->fParams.frameContentSize)
+                      && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {
+                        DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)",
+                                (int)(zds->outBuffSize - zds->outStart),
+                                (U32)zds->fParams.blockSizeMax);
+                        zds->outStart = zds->outEnd = 0;
+                    }
+                    break;
+            }   }
+            /* cannot complete flush */
+            someMoreWork = 0;
+            break;
+
+        default: return ERROR(GENERIC);   /* impossible */
+    }   }
+
+    /* result */
+    input->pos += (size_t)(ip-istart);
+    output->pos += (size_t)(op-ostart);
+    {   size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds);
+        if (!nextSrcSizeHint) {   /* frame fully decoded */
+            if (zds->outEnd == zds->outStart) {  /* output fully flushed */
+                if (zds->hostageByte) {
+                    if (input->pos >= input->size) {
+                        /* can't release hostage (not present) */
+                        zds->streamStage = zdss_read;
+                        return 1;
+                    }
+                    input->pos++;  /* release hostage */
+                }   /* zds->hostageByte */
+                return 0;
+            }  /* zds->outEnd == zds->outStart */
+            if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
+                input->pos--;   /* note : pos > 0, otherwise, impossible to finish reading last block */
+                zds->hostageByte=1;
+            }
+            return 1;
+        }  /* nextSrcSizeHint==0 */
+        nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block);   /* preload header of next block */
+        assert(zds->inPos <= nextSrcSizeHint);
+        nextSrcSizeHint -= zds->inPos;   /* part already loaded*/
+        return nextSrcSizeHint;
+    }
+}
+
+
+size_t ZSTD_decompress_generic(ZSTD_DCtx* dctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+    return ZSTD_decompressStream(dctx, output, input);
+}
+
+size_t ZSTD_decompress_generic_simpleArgs (
+                            ZSTD_DCtx* dctx,
+                            void* dst, size_t dstCapacity, size_t* dstPos,
+                      const void* src, size_t srcSize, size_t* srcPos)
+{
+    ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
+    ZSTD_inBuffer  input  = { src, srcSize, *srcPos };
+    /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
+    size_t const cErr = ZSTD_decompress_generic(dctx, &output, &input);
+    *dstPos = output.pos;
+    *srcPos = input.pos;
+    return cErr;
+}
+
+void ZSTD_DCtx_reset(ZSTD_DCtx* dctx)
+{
+    (void)ZSTD_initDStream(dctx);
+    dctx->format = ZSTD_f_zstd1;
+    dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_double_fast.c b/vendor/github.com/DataDog/zstd/zstd_double_fast.c
new file mode 100644
index 0000000..86e6b39
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_double_fast.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h"
+#include "zstd_double_fast.h"
+
+
+void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
+                              ZSTD_compressionParameters const* cParams,
+                              void const* end)
+{
+    U32* const hashLarge = ms->hashTable;
+    U32  const hBitsL = cParams->hashLog;
+    U32  const mls = cParams->searchLength;
+    U32* const hashSmall = ms->chainTable;
+    U32  const hBitsS = cParams->chainLog;
+    const BYTE* const base = ms->window.base;
+    const BYTE* ip = base + ms->nextToUpdate;
+    const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
+    const U32 fastHashFillStep = 3;
+
+    /* Always insert every fastHashFillStep position into the hash tables.
+     * Insert the other positions into the large hash table if their entry
+     * is empty.
+     */
+    for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
+        U32 const current = (U32)(ip - base);
+        U32 i;
+        for (i = 0; i < fastHashFillStep; ++i) {
+            size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls);
+            size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8);
+            if (i == 0)
+                hashSmall[smHash] = current + i;
+            if (i == 0 || hashLarge[lgHash] == 0)
+                hashLarge[lgHash] = current + i;
+        }
+    }
+}
+
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_doubleFast_generic(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize,
+        U32 const mls /* template */)
+{
+    U32* const hashLong = ms->hashTable;
+    const U32 hBitsL = cParams->hashLog;
+    U32* const hashSmall = ms->chainTable;
+    const U32 hBitsS = cParams->chainLog;
+    const BYTE* const base = ms->window.base;
+    const BYTE* const istart = (const BYTE*)src;
+    const BYTE* ip = istart;
+    const BYTE* anchor = istart;
+    const U32 lowestIndex = ms->window.dictLimit;
+    const BYTE* const lowest = base + lowestIndex;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* const ilimit = iend - HASH_READ_SIZE;
+    U32 offset_1=rep[0], offset_2=rep[1];
+    U32 offsetSaved = 0;
+
+    /* init */
+    ip += (ip==lowest);
+    {   U32 const maxRep = (U32)(ip-lowest);
+        if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
+        if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
+    }
+
+    /* Main Search Loop */
+    while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
+        size_t mLength;
+        size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
+        size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
+        U32 const current = (U32)(ip-base);
+        U32 const matchIndexL = hashLong[h2];
+        U32 const matchIndexS = hashSmall[h];
+        const BYTE* matchLong = base + matchIndexL;
+        const BYTE* match = base + matchIndexS;
+        hashLong[h2] = hashSmall[h] = current;   /* update hash tables */
+
+        assert(offset_1 <= current);   /* supposed guaranteed by construction */
+        if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
+            /* favor repcode */
+            mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
+            ip++;
+            ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
+        } else {
+            U32 offset;
+            if ( (matchIndexL > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip)) ) {
+                mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
+                offset = (U32)(ip-matchLong);
+                while (((ip>anchor) & (matchLong>lowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
+            } else if ( (matchIndexS > lowestIndex) && (MEM_read32(match) == MEM_read32(ip)) ) {
+                size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
+                U32 const matchIndexL3 = hashLong[hl3];
+                const BYTE* matchL3 = base + matchIndexL3;
+                hashLong[hl3] = current + 1;
+                if ( (matchIndexL3 > lowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1)) ) {
+                    mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
+                    ip++;
+                    offset = (U32)(ip-matchL3);
+                    while (((ip>anchor) & (matchL3>lowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
+                } else {
+                    mLength = ZSTD_count(ip+4, match+4, iend) + 4;
+                    offset = (U32)(ip-match);
+                    while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+                }
+            } else {
+                ip += ((ip-anchor) >> kSearchStrength) + 1;
+                continue;
+            }
+
+            offset_2 = offset_1;
+            offset_1 = offset;
+
+            ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+        }
+
+        /* match found */
+        ip += mLength;
+        anchor = ip;
+
+        if (ip <= ilimit) {
+            /* Fill Table */
+            hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] =
+                hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2;  /* here because current+2 could be > iend-8 */
+            hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] =
+                hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
+
+            /* check immediate repcode */
+            while ( (ip <= ilimit)
+                 && ( (offset_2>0)
+                 & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
+                /* store sequence */
+                size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+                { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
+                hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
+                hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
+                ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
+                ip += rLength;
+                anchor = ip;
+                continue;   /* faster when present ... (?) */
+    }   }   }
+
+    /* save reps for next block */
+    rep[0] = offset_1 ? offset_1 : offsetSaved;
+    rep[1] = offset_2 ? offset_2 : offsetSaved;
+
+    /* Return the last literals size */
+    return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_doubleFast(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+{
+    const U32 mls = cParams->searchLength;
+    switch(mls)
+    {
+    default: /* includes case 3 */
+    case 4 :
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 4);
+    case 5 :
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 5);
+    case 6 :
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 6);
+    case 7 :
+        return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, cParams, src, srcSize, 7);
+    }
+}
+
+
+static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize,
+        U32 const mls /* template */)
+{
+    U32* const hashLong = ms->hashTable;
+    U32  const hBitsL = cParams->hashLog;
+    U32* const hashSmall = ms->chainTable;
+    U32  const hBitsS = cParams->chainLog;
+    const BYTE* const base = ms->window.base;
+    const BYTE* const dictBase = ms->window.dictBase;
+    const BYTE* const istart = (const BYTE*)src;
+    const BYTE* ip = istart;
+    const BYTE* anchor = istart;
+    const U32   lowestIndex = ms->window.lowLimit;
+    const BYTE* const dictStart = dictBase + lowestIndex;
+    const U32   dictLimit = ms->window.dictLimit;
+    const BYTE* const lowPrefixPtr = base + dictLimit;
+    const BYTE* const dictEnd = dictBase + dictLimit;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* const ilimit = iend - 8;
+    U32 offset_1=rep[0], offset_2=rep[1];
+
+    /* Search Loop */
+    while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
+        const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
+        const U32 matchIndex = hashSmall[hSmall];
+        const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
+        const BYTE* match = matchBase + matchIndex;
+
+        const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
+        const U32 matchLongIndex = hashLong[hLong];
+        const BYTE* matchLongBase = matchLongIndex < dictLimit ? dictBase : base;
+        const BYTE* matchLong = matchLongBase + matchLongIndex;
+
+        const U32 current = (U32)(ip-base);
+        const U32 repIndex = current + 1 - offset_1;   /* offset_1 expected <= current +1 */
+        const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
+        const BYTE* repMatch = repBase + repIndex;
+        size_t mLength;
+        hashSmall[hSmall] = hashLong[hLong] = current;   /* update hash table */
+
+        if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
+           && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+            const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
+            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
+            ip++;
+            ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
+        } else {
+            if ((matchLongIndex > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
+                const BYTE* matchEnd = matchLongIndex < dictLimit ? dictEnd : iend;
+                const BYTE* lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr;
+                U32 offset;
+                mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, lowPrefixPtr) + 8;
+                offset = current - matchLongIndex;
+                while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; }   /* catch up */
+                offset_2 = offset_1;
+                offset_1 = offset;
+                ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+
+            } else if ((matchIndex > lowestIndex) && (MEM_read32(match) == MEM_read32(ip))) {
+                size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
+                U32 const matchIndex3 = hashLong[h3];
+                const BYTE* const match3Base = matchIndex3 < dictLimit ? dictBase : base;
+                const BYTE* match3 = match3Base + matchIndex3;
+                U32 offset;
+                hashLong[h3] = current + 1;
+                if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
+                    const BYTE* matchEnd = matchIndex3 < dictLimit ? dictEnd : iend;
+                    const BYTE* lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr;
+                    mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, lowPrefixPtr) + 8;
+                    ip++;
+                    offset = current+1 - matchIndex3;
+                    while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
+                } else {
+                    const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
+                    const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
+                    mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4;
+                    offset = current - matchIndex;
+                    while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
+                }
+                offset_2 = offset_1;
+                offset_1 = offset;
+                ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+
+            } else {
+                ip += ((ip-anchor) >> kSearchStrength) + 1;
+                continue;
+        }   }
+
+        /* found a match : store it */
+        ip += mLength;
+        anchor = ip;
+
+        if (ip <= ilimit) {
+            /* Fill Table */
+            hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2;
+            hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = current+2;
+            hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
+            hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
+            /* check immediate repcode */
+            while (ip <= ilimit) {
+                U32 const current2 = (U32)(ip-base);
+                U32 const repIndex2 = current2 - offset_2;
+                const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
+                if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex))  /* intentional overflow */
+                   && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+                    const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
+                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4;
+                    U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
+                    ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
+                    hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
+                    hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
+                    ip += repLength2;
+                    anchor = ip;
+                    continue;
+                }
+                break;
+    }   }   }
+
+    /* save reps for next block */
+    rep[0] = offset_1;
+    rep[1] = offset_2;
+
+    /* Return the last literals size */
+    return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_doubleFast_extDict(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+{
+    U32 const mls = cParams->searchLength;
+    switch(mls)
+    {
+    default: /* includes case 3 */
+    case 4 :
+        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 4);
+    case 5 :
+        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 5);
+    case 6 :
+        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 6);
+    case 7 :
+        return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 7);
+    }
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_double_fast.h b/vendor/github.com/DataDog/zstd/zstd_double_fast.h
new file mode 100644
index 0000000..6d80b27
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_double_fast.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_DOUBLE_FAST_H
+#define ZSTD_DOUBLE_FAST_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "mem.h"      /* U32 */
+#include "zstd_compress_internal.h"     /* ZSTD_CCtx, size_t */
+
+void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
+                              ZSTD_compressionParameters const* cParams,
+                              void const* end);
+size_t ZSTD_compressBlock_doubleFast(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_doubleFast_extDict(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_DOUBLE_FAST_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_errors.h b/vendor/github.com/DataDog/zstd/zstd_errors.h
new file mode 100644
index 0000000..57533f2
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_errors.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_ERRORS_H_398273423
+#define ZSTD_ERRORS_H_398273423
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*===== dependency =====*/
+#include <stddef.h>   /* size_t */
+
+
+/* =====   ZSTDERRORLIB_API : control library symbols visibility   ===== */
+#ifndef ZSTDERRORLIB_VISIBILITY
+#  if defined(__GNUC__) && (__GNUC__ >= 4)
+#    define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default")))
+#  else
+#    define ZSTDERRORLIB_VISIBILITY
+#  endif
+#endif
+#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
+#  define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY
+#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
+#  define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+#  define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
+#endif
+
+/*-*********************************************
+ *  Error codes list
+ *-*********************************************
+ *  Error codes _values_ are pinned down since v1.3.1 only.
+ *  Therefore, don't rely on values if you may link to any version < v1.3.1.
+ *
+ *  Only values < 100 are considered stable.
+ *
+ *  note 1 : this API shall be used with static linking only.
+ *           dynamic linking is not yet officially supported.
+ *  note 2 : Prefer relying on the enum than on its value whenever possible
+ *           This is the only supported way to use the error list < v1.3.1
+ *  note 3 : ZSTD_isError() is always correct, whatever the library version.
+ **********************************************/
+typedef enum {
+  ZSTD_error_no_error = 0,
+  ZSTD_error_GENERIC  = 1,
+  ZSTD_error_prefix_unknown                = 10,
+  ZSTD_error_version_unsupported           = 12,
+  ZSTD_error_frameParameter_unsupported    = 14,
+  ZSTD_error_frameParameter_windowTooLarge = 16,
+  ZSTD_error_corruption_detected = 20,
+  ZSTD_error_checksum_wrong      = 22,
+  ZSTD_error_dictionary_corrupted      = 30,
+  ZSTD_error_dictionary_wrong          = 32,
+  ZSTD_error_dictionaryCreation_failed = 34,
+  ZSTD_error_parameter_unsupported   = 40,
+  ZSTD_error_parameter_outOfBound    = 42,
+  ZSTD_error_tableLog_tooLarge       = 44,
+  ZSTD_error_maxSymbolValue_tooLarge = 46,
+  ZSTD_error_maxSymbolValue_tooSmall = 48,
+  ZSTD_error_stage_wrong       = 60,
+  ZSTD_error_init_missing      = 62,
+  ZSTD_error_memory_allocation = 64,
+  ZSTD_error_workSpace_tooSmall= 66,
+  ZSTD_error_dstSize_tooSmall = 70,
+  ZSTD_error_srcSize_wrong    = 72,
+  /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
+  ZSTD_error_frameIndex_tooLarge = 100,
+  ZSTD_error_seekableIO          = 102,
+  ZSTD_error_maxCode = 120  /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
+} ZSTD_ErrorCode;
+
+/*! ZSTD_getErrorCode() :
+    convert a `size_t` function result into a `ZSTD_ErrorCode` enum type,
+    which can be used to compare with enum list published above */
+ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult);
+ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code);   /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_ERRORS_H_398273423 */
diff --git a/vendor/github.com/DataDog/zstd/zstd_fast.c b/vendor/github.com/DataDog/zstd/zstd_fast.c
new file mode 100644
index 0000000..df4d28b
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_fast.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h"
+#include "zstd_fast.h"
+
+
+void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
+                        ZSTD_compressionParameters const* cParams,
+                        void const* end)
+{
+    U32* const hashTable = ms->hashTable;
+    U32  const hBits = cParams->hashLog;
+    U32  const mls = cParams->searchLength;
+    const BYTE* const base = ms->window.base;
+    const BYTE* ip = base + ms->nextToUpdate;
+    const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
+    const U32 fastHashFillStep = 3;
+
+    /* Always insert every fastHashFillStep position into the hash table.
+     * Insert the other positions if their hash entry is empty.
+     */
+    for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
+        U32 const current = (U32)(ip - base);
+        U32 i;
+        for (i = 0; i < fastHashFillStep; ++i) {
+            size_t const hash = ZSTD_hashPtr(ip + i, hBits, mls);
+            if (i == 0 || hashTable[hash] == 0)
+                hashTable[hash] = current + i;
+        }
+    }
+}
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_fast_generic(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize,
+        U32 const hlog, U32 const stepSize, U32 const mls)
+{
+    U32* const hashTable = ms->hashTable;
+    const BYTE* const base = ms->window.base;
+    const BYTE* const istart = (const BYTE*)src;
+    const BYTE* ip = istart;
+    const BYTE* anchor = istart;
+    const U32   lowestIndex = ms->window.dictLimit;
+    const BYTE* const lowest = base + lowestIndex;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* const ilimit = iend - HASH_READ_SIZE;
+    U32 offset_1=rep[0], offset_2=rep[1];
+    U32 offsetSaved = 0;
+
+    /* init */
+    ip += (ip==lowest);
+    {   U32 const maxRep = (U32)(ip-lowest);
+        if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
+        if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
+    }
+
+    /* Main Search Loop */
+    while (ip < ilimit) {   /* < instead of <=, because repcode check at (ip+1) */
+        size_t mLength;
+        size_t const h = ZSTD_hashPtr(ip, hlog, mls);
+        U32 const current = (U32)(ip-base);
+        U32 const matchIndex = hashTable[h];
+        const BYTE* match = base + matchIndex;
+        hashTable[h] = current;   /* update hash table */
+
+        if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
+            mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
+            ip++;
+            ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
+        } else {
+            if ( (matchIndex <= lowestIndex)
+              || (MEM_read32(match) != MEM_read32(ip)) ) {
+                assert(stepSize >= 1);
+                ip += ((ip-anchor) >> kSearchStrength) + stepSize;
+                continue;
+            }
+            mLength = ZSTD_count(ip+4, match+4, iend) + 4;
+            {   U32 const offset = (U32)(ip-match);
+                while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
+                offset_2 = offset_1;
+                offset_1 = offset;
+                ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+        }   }
+
+        /* match found */
+        ip += mLength;
+        anchor = ip;
+
+        if (ip <= ilimit) {
+            /* Fill Table */
+            hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2;  /* here because current+2 could be > iend-8 */
+            hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
+            /* check immediate repcode */
+            while ( (ip <= ilimit)
+                 && ( (offset_2>0)
+                 & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
+                /* store sequence */
+                size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+                { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; }  /* swap offset_2 <=> offset_1 */
+                hashTable[ZSTD_hashPtr(ip, hlog, mls)] = (U32)(ip-base);
+                ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
+                ip += rLength;
+                anchor = ip;
+                continue;   /* faster when present ... (?) */
+    }   }   }
+
+    /* save reps for next block */
+    rep[0] = offset_1 ? offset_1 : offsetSaved;
+    rep[1] = offset_2 ? offset_2 : offsetSaved;
+
+    /* Return the last literals size */
+    return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_fast(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+{
+    U32 const hlog = cParams->hashLog;
+    U32 const mls = cParams->searchLength;
+    U32 const stepSize = cParams->targetLength;
+    switch(mls)
+    {
+    default: /* includes case 3 */
+    case 4 :
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 4);
+    case 5 :
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 5);
+    case 6 :
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 6);
+    case 7 :
+        return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 7);
+    }
+}
+
+
+static size_t ZSTD_compressBlock_fast_extDict_generic(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        void const* src, size_t srcSize,
+        U32 const hlog, U32 const stepSize, U32 const mls)
+{
+    U32* hashTable = ms->hashTable;
+    const BYTE* const base = ms->window.base;
+    const BYTE* const dictBase = ms->window.dictBase;
+    const BYTE* const istart = (const BYTE*)src;
+    const BYTE* ip = istart;
+    const BYTE* anchor = istart;
+    const U32   lowestIndex = ms->window.lowLimit;
+    const BYTE* const dictStart = dictBase + lowestIndex;
+    const U32   dictLimit = ms->window.dictLimit;
+    const BYTE* const lowPrefixPtr = base + dictLimit;
+    const BYTE* const dictEnd = dictBase + dictLimit;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* const ilimit = iend - 8;
+    U32 offset_1=rep[0], offset_2=rep[1];
+
+    /* Search Loop */
+    while (ip < ilimit) {  /* < instead of <=, because (ip+1) */
+        const size_t h = ZSTD_hashPtr(ip, hlog, mls);
+        const U32 matchIndex = hashTable[h];
+        const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
+        const BYTE* match = matchBase + matchIndex;
+        const U32 current = (U32)(ip-base);
+        const U32 repIndex = current + 1 - offset_1;   /* offset_1 expected <= current +1 */
+        const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
+        const BYTE* repMatch = repBase + repIndex;
+        size_t mLength;
+        hashTable[h] = current;   /* update hash table */
+
+        if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
+           && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
+            const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
+            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
+            ip++;
+            ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
+        } else {
+            if ( (matchIndex < lowestIndex) ||
+                 (MEM_read32(match) != MEM_read32(ip)) ) {
+                assert(stepSize >= 1);
+                ip += ((ip-anchor) >> kSearchStrength) + stepSize;
+                continue;
+            }
+            {   const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
+                const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
+                U32 offset;
+                mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4;
+                while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
+                offset = current - matchIndex;
+                offset_2 = offset_1;
+                offset_1 = offset;
+                ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
+        }   }
+
+        /* found a match : store it */
+        ip += mLength;
+        anchor = ip;
+
+        if (ip <= ilimit) {
+            /* Fill Table */
+            hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2;
+            hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
+            /* check immediate repcode */
+            while (ip <= ilimit) {
+                U32 const current2 = (U32)(ip-base);
+                U32 const repIndex2 = current2 - offset_2;
+                const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
+                if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex))  /* intentional overflow */
+                   && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
+                    const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
+                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4;
+                    U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
+                    ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
+                    hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
+                    ip += repLength2;
+                    anchor = ip;
+                    continue;
+                }
+                break;
+    }   }   }
+
+    /* save reps for next block */
+    rep[0] = offset_1;
+    rep[1] = offset_2;
+
+    /* Return the last literals size */
+    return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_fast_extDict(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+{
+    U32 const hlog = cParams->hashLog;
+    U32 const mls = cParams->searchLength;
+    U32 const stepSize = cParams->targetLength;
+    switch(mls)
+    {
+    default: /* includes case 3 */
+    case 4 :
+        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 4);
+    case 5 :
+        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 5);
+    case 6 :
+        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 6);
+    case 7 :
+        return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 7);
+    }
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_fast.h b/vendor/github.com/DataDog/zstd/zstd_fast.h
new file mode 100644
index 0000000..f0438ad
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_fast.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_FAST_H
+#define ZSTD_FAST_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "mem.h"      /* U32 */
+#include "zstd_compress_internal.h"
+
+void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
+                        ZSTD_compressionParameters const* cParams,
+                        void const* end);
+size_t ZSTD_compressBlock_fast(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_fast_extDict(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_FAST_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_internal.h b/vendor/github.com/DataDog/zstd/zstd_internal.h
new file mode 100644
index 0000000..65c08a8
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_internal.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_CCOMMON_H_MODULE
+#define ZSTD_CCOMMON_H_MODULE
+
+/* this module contains definitions which must be identical
+ * across compression, decompression and dictBuilder.
+ * It also contains a few functions useful to at least 2 of them
+ * and which benefit from being inlined */
+
+/*-*************************************
+*  Dependencies
+***************************************/
+#include "compiler.h"
+#include "mem.h"
+#include "error_private.h"
+#define ZSTD_STATIC_LINKING_ONLY
+#include "zstd.h"
+#define FSE_STATIC_LINKING_ONLY
+#include "fse.h"
+#define HUF_STATIC_LINKING_ONLY
+#include "huf.h"
+#ifndef XXH_STATIC_LINKING_ONLY
+#  define XXH_STATIC_LINKING_ONLY  /* XXH64_state_t */
+#endif
+#include "xxhash.h"                /* XXH_reset, update, digest */
+
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*-*************************************
+*  Debug
+***************************************/
+#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1)
+#  include <assert.h>
+#else
+#  ifndef assert
+#    define assert(condition) ((void)0)
+#  endif
+#endif
+
+#define ZSTD_STATIC_ASSERT(c) { enum { ZSTD_static_assert = 1/(int)(!!(c)) }; }
+
+#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=2)
+#  include <stdio.h>
+extern int g_debuglog_enable;
+/* recommended values for ZSTD_DEBUG display levels :
+ * 1 : no display, enables assert() only
+ * 2 : reserved for currently active debug path
+ * 3 : events once per object lifetime (CCtx, CDict, etc.)
+ * 4 : events once per frame
+ * 5 : events once per block
+ * 6 : events once per sequence (*very* verbose) */
+#  define RAWLOG(l, ...) {                                      \
+                if ((g_debuglog_enable) & (l<=ZSTD_DEBUG)) {    \
+                    fprintf(stderr, __VA_ARGS__);               \
+            }   }
+#  define DEBUGLOG(l, ...) {                                    \
+                if ((g_debuglog_enable) & (l<=ZSTD_DEBUG)) {    \
+                    fprintf(stderr, __FILE__ ": " __VA_ARGS__); \
+                    fprintf(stderr, " \n");                     \
+            }   }
+#else
+#  define RAWLOG(l, ...)      {}    /* disabled */
+#  define DEBUGLOG(l, ...)    {}    /* disabled */
+#endif
+
+
+/*-*************************************
+*  shared macros
+***************************************/
+#undef MIN
+#undef MAX
+#define MIN(a,b) ((a)<(b) ? (a) : (b))
+#define MAX(a,b) ((a)>(b) ? (a) : (b))
+#define CHECK_F(f) { size_t const errcod = f; if (ERR_isError(errcod)) return errcod; }  /* check and Forward error code */
+#define CHECK_E(f, e) { size_t const errcod = f; if (ERR_isError(errcod)) return ERROR(e); }  /* check and send Error code */
+
+
+/*-*************************************
+*  Common constants
+***************************************/
+#define ZSTD_OPT_NUM    (1<<12)
+
+#define ZSTD_REP_NUM      3                 /* number of repcodes */
+#define ZSTD_REP_MOVE     (ZSTD_REP_NUM-1)
+static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BIT7 128
+#define BIT6  64
+#define BIT5  32
+#define BIT4  16
+#define BIT1   2
+#define BIT0   1
+
+#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
+#define ZSTD_WINDOWLOG_DEFAULTMAX 27 /* Default maximum allowed window log */
+static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
+static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
+
+#define ZSTD_FRAMEIDSIZE 4
+static const size_t ZSTD_frameIdSize = ZSTD_FRAMEIDSIZE;  /* magic number size */
+
+#define ZSTD_BLOCKHEADERSIZE 3   /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
+static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
+typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
+
+#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
+#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */)   /* for a non-null block */
+
+#define HufLog 12
+typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
+
+#define LONGNBSEQ 0x7F00
+
+#define MINMATCH 3
+
+#define Litbits  8
+#define MaxLit ((1<<Litbits) - 1)
+#define MaxML   52
+#define MaxLL   35
+#define DefaultMaxOff 28
+#define MaxOff  31
+#define MaxSeq MAX(MaxLL, MaxML)   /* Assumption : MaxOff < MaxLL,MaxML */
+#define MLFSELog    9
+#define LLFSELog    9
+#define OffFSELog   8
+#define MaxFSELog  MAX(MAX(MLFSELog, LLFSELog), OffFSELog)
+
+static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0,
+                                      0, 0, 0, 0, 0, 0, 0, 0,
+                                      1, 1, 1, 1, 2, 2, 3, 3,
+                                      4, 6, 7, 8, 9,10,11,12,
+                                     13,14,15,16 };
+static const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2,
+                                             2, 2, 2, 2, 2, 1, 1, 1,
+                                             2, 2, 2, 2, 2, 2, 2, 2,
+                                             2, 3, 2, 1, 1, 1, 1, 1,
+                                            -1,-1,-1,-1 };
+#define LL_DEFAULTNORMLOG 6  /* for static allocation */
+static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
+
+static const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0,
+                                      0, 0, 0, 0, 0, 0, 0, 0,
+                                      0, 0, 0, 0, 0, 0, 0, 0,
+                                      0, 0, 0, 0, 0, 0, 0, 0,
+                                      1, 1, 1, 1, 2, 2, 3, 3,
+                                      4, 4, 5, 7, 8, 9,10,11,
+                                     12,13,14,15,16 };
+static const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2,
+                                             2, 1, 1, 1, 1, 1, 1, 1,
+                                             1, 1, 1, 1, 1, 1, 1, 1,
+                                             1, 1, 1, 1, 1, 1, 1, 1,
+                                             1, 1, 1, 1, 1, 1, 1, 1,
+                                             1, 1, 1, 1, 1, 1,-1,-1,
+                                            -1,-1,-1,-1,-1 };
+#define ML_DEFAULTNORMLOG 6  /* for static allocation */
+static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
+
+static const S16 OF_defaultNorm[DefaultMaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2,
+                                                     2, 1, 1, 1, 1, 1, 1, 1,
+                                                     1, 1, 1, 1, 1, 1, 1, 1,
+                                                    -1,-1,-1,-1,-1 };
+#define OF_DEFAULTNORMLOG 5  /* for static allocation */
+static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
+
+
+/*-*******************************************
+*  Shared functions to include for inlining
+*********************************************/
+static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
+#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
+
+/*! ZSTD_wildcopy() :
+ *  custom version of memcpy(), can overwrite up to WILDCOPY_OVERLENGTH bytes (if length==0) */
+#define WILDCOPY_OVERLENGTH 8
+MEM_STATIC void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
+{
+    const BYTE* ip = (const BYTE*)src;
+    BYTE* op = (BYTE*)dst;
+    BYTE* const oend = op + length;
+    do
+        COPY8(op, ip)
+    while (op < oend);
+}
+
+MEM_STATIC void ZSTD_wildcopy_e(void* dst, const void* src, void* dstEnd)   /* should be faster for decoding, but strangely, not verified on all platform */
+{
+    const BYTE* ip = (const BYTE*)src;
+    BYTE* op = (BYTE*)dst;
+    BYTE* const oend = (BYTE*)dstEnd;
+    do
+        COPY8(op, ip)
+    while (op < oend);
+}
+
+
+/*-*******************************************
+*  Private declarations
+*********************************************/
+typedef struct seqDef_s {
+    U32 offset;
+    U16 litLength;
+    U16 matchLength;
+} seqDef;
+
+typedef struct {
+    seqDef* sequencesStart;
+    seqDef* sequences;
+    BYTE* litStart;
+    BYTE* lit;
+    BYTE* llCode;
+    BYTE* mlCode;
+    BYTE* ofCode;
+    U32   longLengthID;   /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
+    U32   longLengthPos;
+} seqStore_t;
+
+const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx);   /* compress & dictBuilder */
+void ZSTD_seqToCodes(const seqStore_t* seqStorePtr);   /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
+
+/* custom memory allocation functions */
+void* ZSTD_malloc(size_t size, ZSTD_customMem customMem);
+void* ZSTD_calloc(size_t size, ZSTD_customMem customMem);
+void ZSTD_free(void* ptr, ZSTD_customMem customMem);
+
+
+MEM_STATIC U32 ZSTD_highbit32(U32 val)   /* compress, dictBuilder, decodeCorpus */
+{
+    assert(val != 0);
+    {
+#   if defined(_MSC_VER)   /* Visual */
+        unsigned long r=0;
+        _BitScanReverse(&r, val);
+        return (unsigned)r;
+#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* GCC Intrinsic */
+        return 31 - __builtin_clz(val);
+#   else   /* Software version */
+        static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+        U32 v = val;
+        v |= v >> 1;
+        v |= v >> 2;
+        v |= v >> 4;
+        v |= v >> 8;
+        v |= v >> 16;
+        return DeBruijnClz[(v * 0x07C4ACDDU) >> 27];
+#   endif
+    }
+}
+
+
+/* ZSTD_invalidateRepCodes() :
+ * ensures next compression will not use repcodes from previous block.
+ * Note : only works with regular variant;
+ *        do not use with extDict variant ! */
+void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx);   /* zstdmt, adaptive_compression (shouldn't get this definition from here) */
+
+
+typedef struct {
+    blockType_e blockType;
+    U32 lastBlock;
+    U32 origSize;
+} blockProperties_t;
+
+/*! ZSTD_getcBlockSize() :
+ *  Provides the size of compressed block from block header `src` */
+/* Used by: decompress, fullbench (does not get its definition from here) */
+size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
+                          blockProperties_t* bpPtr);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif   /* ZSTD_CCOMMON_H_MODULE */
diff --git a/vendor/github.com/DataDog/zstd/zstd_lazy.c b/vendor/github.com/DataDog/zstd/zstd_lazy.c
new file mode 100644
index 0000000..9f15812
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_lazy.c
@@ -0,0 +1,824 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h"
+#include "zstd_lazy.h"
+
+
+/*-*************************************
+*  Binary Tree search
+***************************************/
+
+void ZSTD_updateDUBT(
+                ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                const BYTE* ip, const BYTE* iend,
+                U32 mls)
+{
+    U32* const hashTable = ms->hashTable;
+    U32  const hashLog = cParams->hashLog;
+
+    U32* const bt = ms->chainTable;
+    U32  const btLog  = cParams->chainLog - 1;
+    U32  const btMask = (1 << btLog) - 1;
+
+    const BYTE* const base = ms->window.base;
+    U32 const target = (U32)(ip - base);
+    U32 idx = ms->nextToUpdate;
+
+    if (idx != target)
+        DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",
+                    idx, target, ms->window.dictLimit);
+    assert(ip + 8 <= iend);   /* condition for ZSTD_hashPtr */
+    (void)iend;
+
+    assert(idx >= ms->window.dictLimit);   /* condition for valid base+idx */
+    for ( ; idx < target ; idx++) {
+        size_t const h  = ZSTD_hashPtr(base + idx, hashLog, mls);   /* assumption : ip + 8 <= iend */
+        U32    const matchIndex = hashTable[h];
+
+        U32*   const nextCandidatePtr = bt + 2*(idx&btMask);
+        U32*   const sortMarkPtr  = nextCandidatePtr + 1;
+
+        DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);
+        hashTable[h] = idx;   /* Update Hash Table */
+        *nextCandidatePtr = matchIndex;   /* update BT like a chain */
+        *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;
+    }
+    ms->nextToUpdate = target;
+}
+
+
+/** ZSTD_insertDUBT1() :
+ *  sort one already inserted but unsorted position
+ *  assumption : current >= btlow == (current - btmask)
+ *  doesn't fail */
+static void ZSTD_insertDUBT1(
+                 ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                 U32 current, const BYTE* inputEnd,
+                 U32 nbCompares, U32 btLow, int extDict)
+{
+    U32*   const bt = ms->chainTable;
+    U32    const btLog  = cParams->chainLog - 1;
+    U32    const btMask = (1 << btLog) - 1;
+    size_t commonLengthSmaller=0, commonLengthLarger=0;
+    const BYTE* const base = ms->window.base;
+    const BYTE* const dictBase = ms->window.dictBase;
+    const U32 dictLimit = ms->window.dictLimit;
+    const BYTE* const ip = (current>=dictLimit) ? base + current : dictBase + current;
+    const BYTE* const iend = (current>=dictLimit) ? inputEnd : dictBase + dictLimit;
+    const BYTE* const dictEnd = dictBase + dictLimit;
+    const BYTE* const prefixStart = base + dictLimit;
+    const BYTE* match;
+    U32* smallerPtr = bt + 2*(current&btMask);
+    U32* largerPtr  = smallerPtr + 1;
+    U32 matchIndex = *smallerPtr;
+    U32 dummy32;   /* to be nullified at the end */
+    U32 const windowLow = ms->window.lowLimit;
+
+    DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
+                current, dictLimit, windowLow);
+    assert(current >= btLow);
+    assert(ip < iend);   /* condition for ZSTD_count */
+
+    while (nbCompares-- && (matchIndex > windowLow)) {
+        U32* const nextPtr = bt + 2*(matchIndex & btMask);
+        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
+        assert(matchIndex < current);
+
+        if ( (!extDict)
+          || (matchIndex+matchLength >= dictLimit)  /* both in current segment*/
+          || (current < dictLimit) /* both in extDict */) {
+            const BYTE* const mBase = !extDict || ((matchIndex+matchLength) >= dictLimit) ? base : dictBase;
+            assert( (matchIndex+matchLength >= dictLimit)   /* might be wrong if extDict is incorrectly set to 0 */
+                 || (current < dictLimit) );
+            match = mBase + matchIndex;
+            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
+        } else {
+            match = dictBase + matchIndex;
+            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+            if (matchIndex+matchLength >= dictLimit)
+                match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */
+        }
+
+        DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
+                    current, matchIndex, (U32)matchLength);
+
+        if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */
+            break;   /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
+        }
+
+        if (match[matchLength] < ip[matchLength]) {  /* necessarily within buffer */
+            /* match is smaller than current */
+            *smallerPtr = matchIndex;             /* update smaller idx */
+            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
+            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
+            DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",
+                        matchIndex, btLow, nextPtr[1]);
+            smallerPtr = nextPtr+1;               /* new "candidate" => larger than match, which was smaller than target */
+            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous and closer to current */
+        } else {
+            /* match is larger than current */
+            *largerPtr = matchIndex;
+            commonLengthLarger = matchLength;
+            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
+            DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",
+                        matchIndex, btLow, nextPtr[0]);
+            largerPtr = nextPtr;
+            matchIndex = nextPtr[0];
+    }   }
+
+    *smallerPtr = *largerPtr = 0;
+}
+
+
+static size_t ZSTD_DUBT_findBestMatch (
+                            ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                            const BYTE* const ip, const BYTE* const iend,
+                            size_t* offsetPtr,
+                            U32 const mls,
+                            U32 const extDict)
+{
+    U32*   const hashTable = ms->hashTable;
+    U32    const hashLog = cParams->hashLog;
+    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
+    U32          matchIndex  = hashTable[h];
+
+    const BYTE* const base = ms->window.base;
+    U32    const current = (U32)(ip-base);
+    U32    const windowLow = ms->window.lowLimit;
+
+    U32*   const bt = ms->chainTable;
+    U32    const btLog  = cParams->chainLog - 1;
+    U32    const btMask = (1 << btLog) - 1;
+    U32    const btLow = (btMask >= current) ? 0 : current - btMask;
+    U32    const unsortLimit = MAX(btLow, windowLow);
+
+    U32*         nextCandidate = bt + 2*(matchIndex&btMask);
+    U32*         unsortedMark = bt + 2*(matchIndex&btMask) + 1;
+    U32          nbCompares = 1U << cParams->searchLog;
+    U32          nbCandidates = nbCompares;
+    U32          previousCandidate = 0;
+
+    DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", current);
+    assert(ip <= iend-8);   /* required for h calculation */
+
+    /* reach end of unsorted candidates list */
+    while ( (matchIndex > unsortLimit)
+         && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)
+         && (nbCandidates > 1) ) {
+        DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
+                    matchIndex);
+        *unsortedMark = previousCandidate;
+        previousCandidate = matchIndex;
+        matchIndex = *nextCandidate;
+        nextCandidate = bt + 2*(matchIndex&btMask);
+        unsortedMark = bt + 2*(matchIndex&btMask) + 1;
+        nbCandidates --;
+    }
+
+    if ( (matchIndex > unsortLimit)
+      && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
+        DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
+                    matchIndex);
+        *nextCandidate = *unsortedMark = 0;   /* nullify next candidate if it's still unsorted (note : simplification, detrimental to compression ratio, beneficial for speed) */
+    }
+
+    /* batch sort stacked candidates */
+    matchIndex = previousCandidate;
+    while (matchIndex) {  /* will end on matchIndex == 0 */
+        U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
+        U32 const nextCandidateIdx = *nextCandidateIdxPtr;
+        ZSTD_insertDUBT1(ms, cParams, matchIndex, iend,
+                         nbCandidates, unsortLimit, extDict);
+        matchIndex = nextCandidateIdx;
+        nbCandidates++;
+    }
+
+    /* find longest match */
+    {   size_t commonLengthSmaller=0, commonLengthLarger=0;
+        const BYTE* const dictBase = ms->window.dictBase;
+        const U32 dictLimit = ms->window.dictLimit;
+        const BYTE* const dictEnd = dictBase + dictLimit;
+        const BYTE* const prefixStart = base + dictLimit;
+        U32* smallerPtr = bt + 2*(current&btMask);
+        U32* largerPtr  = bt + 2*(current&btMask) + 1;
+        U32 matchEndIdx = current+8+1;
+        U32 dummy32;   /* to be nullified at the end */
+        size_t bestLength = 0;
+
+        matchIndex  = hashTable[h];
+        hashTable[h] = current;   /* Update Hash Table */
+
+        while (nbCompares-- && (matchIndex > windowLow)) {
+            U32* const nextPtr = bt + 2*(matchIndex & btMask);
+            size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
+            const BYTE* match;
+
+            if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
+                match = base + matchIndex;
+                matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
+            } else {
+                match = dictBase + matchIndex;
+                matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+                if (matchIndex+matchLength >= dictLimit)
+                    match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */
+            }
+
+            if (matchLength > bestLength) {
+                if (matchLength > matchEndIdx - matchIndex)
+                    matchEndIdx = matchIndex + (U32)matchLength;
+                if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
+                    bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
+                if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */
+                    break;   /* drop, to guarantee consistency (miss a little bit of compression) */
+                }
+            }
+
+            if (match[matchLength] < ip[matchLength]) {
+                /* match is smaller than current */
+                *smallerPtr = matchIndex;             /* update smaller idx */
+                commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
+                if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
+                smallerPtr = nextPtr+1;               /* new "smaller" => larger of match */
+                matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
+            } else {
+                /* match is larger than current */
+                *largerPtr = matchIndex;
+                commonLengthLarger = matchLength;
+                if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
+                largerPtr = nextPtr;
+                matchIndex = nextPtr[0];
+        }   }
+
+        *smallerPtr = *largerPtr = 0;
+
+        assert(matchEndIdx > current+8); /* ensure nextToUpdate is increased */
+        ms->nextToUpdate = matchEndIdx - 8;   /* skip repetitive patterns */
+        if (bestLength >= MINMATCH) {
+            U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
+            DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
+                        current, (U32)bestLength, (U32)*offsetPtr, mIndex);
+        }
+        return bestLength;
+    }
+}
+
+
+/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
+static size_t ZSTD_BtFindBestMatch (
+                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        const BYTE* const ip, const BYTE* const iLimit,
+                        size_t* offsetPtr,
+                        const U32 mls /* template */)
+{
+    DEBUGLOG(7, "ZSTD_BtFindBestMatch");
+    if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */
+    ZSTD_updateDUBT(ms, cParams, ip, iLimit, mls);
+    return ZSTD_DUBT_findBestMatch(ms, cParams, ip, iLimit, offsetPtr, mls, 0);
+}
+
+
+static size_t ZSTD_BtFindBestMatch_selectMLS (
+                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        const BYTE* ip, const BYTE* const iLimit,
+                        size_t* offsetPtr)
+{
+    switch(cParams->searchLength)
+    {
+    default : /* includes case 3 */
+    case 4 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 4);
+    case 5 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 5);
+    case 7 :
+    case 6 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 6);
+    }
+}
+
+
+/** Tree updater, providing best match */
+static size_t ZSTD_BtFindBestMatch_extDict (
+                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        const BYTE* const ip, const BYTE* const iLimit,
+                        size_t* offsetPtr,
+                        const U32 mls)
+{
+    DEBUGLOG(7, "ZSTD_BtFindBestMatch_extDict");
+    if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */
+    ZSTD_updateDUBT(ms, cParams, ip, iLimit, mls);
+    return ZSTD_DUBT_findBestMatch(ms, cParams, ip, iLimit, offsetPtr, mls, 1);
+}
+
+
+static size_t ZSTD_BtFindBestMatch_selectMLS_extDict (
+                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        const BYTE* ip, const BYTE* const iLimit,
+                        size_t* offsetPtr)
+{
+    switch(cParams->searchLength)
+    {
+    default : /* includes case 3 */
+    case 4 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 4);
+    case 5 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 5);
+    case 7 :
+    case 6 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 6);
+    }
+}
+
+
+
+/* *********************************
+*  Hash Chain
+***********************************/
+#define NEXT_IN_CHAIN(d, mask)   chainTable[(d) & mask]
+
+/* Update chains up to ip (excluded)
+   Assumption : always within prefix (i.e. not within extDict) */
+static U32 ZSTD_insertAndFindFirstIndex_internal(
+                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        const BYTE* ip, U32 const mls)
+{
+    U32* const hashTable  = ms->hashTable;
+    const U32 hashLog = cParams->hashLog;
+    U32* const chainTable = ms->chainTable;
+    const U32 chainMask = (1 << cParams->chainLog) - 1;
+    const BYTE* const base = ms->window.base;
+    const U32 target = (U32)(ip - base);
+    U32 idx = ms->nextToUpdate;
+
+    while(idx < target) { /* catch up */
+        size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
+        NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
+        hashTable[h] = idx;
+        idx++;
+    }
+
+    ms->nextToUpdate = target;
+    return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
+}
+
+U32 ZSTD_insertAndFindFirstIndex(
+                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        const BYTE* ip)
+{
+    return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, cParams->searchLength);
+}
+
+
+/* inlining is important to hardwire a hot branch (template emulation) */
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_HcFindBestMatch_generic (
+                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        const BYTE* const ip, const BYTE* const iLimit,
+                        size_t* offsetPtr,
+                        const U32 mls, const U32 extDict)
+{
+    U32* const chainTable = ms->chainTable;
+    const U32 chainSize = (1 << cParams->chainLog);
+    const U32 chainMask = chainSize-1;
+    const BYTE* const base = ms->window.base;
+    const BYTE* const dictBase = ms->window.dictBase;
+    const U32 dictLimit = ms->window.dictLimit;
+    const BYTE* const prefixStart = base + dictLimit;
+    const BYTE* const dictEnd = dictBase + dictLimit;
+    const U32 lowLimit = ms->window.lowLimit;
+    const U32 current = (U32)(ip-base);
+    const U32 minChain = current > chainSize ? current - chainSize : 0;
+    U32 nbAttempts = 1U << cParams->searchLog;
+    size_t ml=4-1;
+
+    /* HC4 match finder */
+    U32 matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
+
+    for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
+        size_t currentMl=0;
+        if ((!extDict) || matchIndex >= dictLimit) {
+            const BYTE* const match = base + matchIndex;
+            if (match[ml] == ip[ml])   /* potentially better */
+                currentMl = ZSTD_count(ip, match, iLimit);
+        } else {
+            const BYTE* const match = dictBase + matchIndex;
+            assert(match+4 <= dictEnd);
+            if (MEM_read32(match) == MEM_read32(ip))   /* assumption : matchIndex <= dictLimit-4 (by table construction) */
+                currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
+        }
+
+        /* save best solution */
+        if (currentMl > ml) {
+            ml = currentMl;
+            *offsetPtr = current - matchIndex + ZSTD_REP_MOVE;
+            if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
+        }
+
+        if (matchIndex <= minChain) break;
+        matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
+    }
+
+    return ml;
+}
+
+
+FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
+                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        const BYTE* ip, const BYTE* const iLimit,
+                        size_t* offsetPtr)
+{
+    switch(cParams->searchLength)
+    {
+    default : /* includes case 3 */
+    case 4 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 4, 0);
+    case 5 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 5, 0);
+    case 7 :
+    case 6 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 6, 0);
+    }
+}
+
+
+FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
+                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        const BYTE* ip, const BYTE* const iLimit,
+                        size_t* const offsetPtr)
+{
+    switch(cParams->searchLength)
+    {
+    default : /* includes case 3 */
+    case 4 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 4, 1);
+    case 5 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 5, 1);
+    case 7 :
+    case 6 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 6, 1);
+    }
+}
+
+
+/* *******************************
+*  Common parser - lazy strategy
+*********************************/
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_lazy_generic(
+                        ZSTD_matchState_t* ms, seqStore_t* seqStore,
+                        U32 rep[ZSTD_REP_NUM],
+                        ZSTD_compressionParameters const* cParams,
+                        const void* src, size_t srcSize,
+                        const U32 searchMethod, const U32 depth)
+{
+    const BYTE* const istart = (const BYTE*)src;
+    const BYTE* ip = istart;
+    const BYTE* anchor = istart;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* const ilimit = iend - 8;
+    const BYTE* const base = ms->window.base + ms->window.dictLimit;
+
+    typedef size_t (*searchMax_f)(
+                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
+    searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
+    U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
+
+    /* init */
+    ip += (ip==base);
+    ms->nextToUpdate3 = ms->nextToUpdate;
+    {   U32 const maxRep = (U32)(ip-base);
+        if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
+        if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
+    }
+
+    /* Match Loop */
+    while (ip < ilimit) {
+        size_t matchLength=0;
+        size_t offset=0;
+        const BYTE* start=ip+1;
+
+        /* check repCode */
+        if ((offset_1>0) & (MEM_read32(ip+1) == MEM_read32(ip+1 - offset_1))) {
+            /* repcode : we take it */
+            matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
+            if (depth==0) goto _storeSequence;
+        }
+
+        /* first search (depth 0) */
+        {   size_t offsetFound = 99999999;
+            size_t const ml2 = searchMax(ms, cParams, ip, iend, &offsetFound);
+            if (ml2 > matchLength)
+                matchLength = ml2, start = ip, offset=offsetFound;
+        }
+
+        if (matchLength < 4) {
+            ip += ((ip-anchor) >> kSearchStrength) + 1;   /* jump faster over incompressible sections */
+            continue;
+        }
+
+        /* let's try to find a better solution */
+        if (depth>=1)
+        while (ip<ilimit) {
+            ip ++;
+            if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+                size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
+                int const gain2 = (int)(mlRep * 3);
+                int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
+                if ((mlRep >= 4) && (gain2 > gain1))
+                    matchLength = mlRep, offset = 0, start = ip;
+            }
+            {   size_t offset2=99999999;
+                size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
+                int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
+                int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
+                if ((ml2 >= 4) && (gain2 > gain1)) {
+                    matchLength = ml2, offset = offset2, start = ip;
+                    continue;   /* search a better one */
+            }   }
+
+            /* let's find an even better one */
+            if ((depth==2) && (ip<ilimit)) {
+                ip ++;
+                if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
+                    size_t const ml2 = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
+                    int const gain2 = (int)(ml2 * 4);
+                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
+                    if ((ml2 >= 4) && (gain2 > gain1))
+                        matchLength = ml2, offset = 0, start = ip;
+                }
+                {   size_t offset2=99999999;
+                    size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
+                    int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
+                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
+                    if ((ml2 >= 4) && (gain2 > gain1)) {
+                        matchLength = ml2, offset = offset2, start = ip;
+                        continue;
+            }   }   }
+            break;  /* nothing found : store previous solution */
+        }
+
+        /* NOTE:
+         * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
+         * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
+         * overflows the pointer, which is undefined behavior.
+         */
+        /* catch up */
+        if (offset) {
+            while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > base))
+                 && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) )  /* only search for offset within prefix */
+                { start--; matchLength++; }
+            offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
+        }
+        /* store sequence */
+_storeSequence:
+        {   size_t const litLength = start - anchor;
+            ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
+            anchor = ip = start + matchLength;
+        }
+
+        /* check immediate repcode */
+        while ( ((ip <= ilimit) & (offset_2>0))
+             && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
+            /* store sequence */
+            matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
+            offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
+            ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
+            ip += matchLength;
+            anchor = ip;
+            continue;   /* faster when present ... (?) */
+    }   }
+
+    /* Save reps for next block */
+    rep[0] = offset_1 ? offset_1 : savedOffset;
+    rep[1] = offset_2 ? offset_2 : savedOffset;
+
+    /* Return the last literals size */
+    return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_btlazy2(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+{
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 1, 2);
+}
+
+size_t ZSTD_compressBlock_lazy2(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+{
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 2);
+}
+
+size_t ZSTD_compressBlock_lazy(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+{
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 1);
+}
+
+size_t ZSTD_compressBlock_greedy(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+{
+    return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 0);
+}
+
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_lazy_extDict_generic(
+                        ZSTD_matchState_t* ms, seqStore_t* seqStore,
+                        U32 rep[ZSTD_REP_NUM],
+                        ZSTD_compressionParameters const* cParams,
+                        const void* src, size_t srcSize,
+                        const U32 searchMethod, const U32 depth)
+{
+    const BYTE* const istart = (const BYTE*)src;
+    const BYTE* ip = istart;
+    const BYTE* anchor = istart;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* const ilimit = iend - 8;
+    const BYTE* const base = ms->window.base;
+    const U32 dictLimit = ms->window.dictLimit;
+    const U32 lowestIndex = ms->window.lowLimit;
+    const BYTE* const prefixStart = base + dictLimit;
+    const BYTE* const dictBase = ms->window.dictBase;
+    const BYTE* const dictEnd  = dictBase + dictLimit;
+    const BYTE* const dictStart  = dictBase + lowestIndex;
+
+    typedef size_t (*searchMax_f)(
+                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
+    searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
+
+    U32 offset_1 = rep[0], offset_2 = rep[1];
+
+    /* init */
+    ms->nextToUpdate3 = ms->nextToUpdate;
+    ip += (ip == prefixStart);
+
+    /* Match Loop */
+    while (ip < ilimit) {
+        size_t matchLength=0;
+        size_t offset=0;
+        const BYTE* start=ip+1;
+        U32 current = (U32)(ip-base);
+
+        /* check repCode */
+        {   const U32 repIndex = (U32)(current+1 - offset_1);
+            const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+            const BYTE* const repMatch = repBase + repIndex;
+            if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex))   /* intentional overflow */
+            if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
+                /* repcode detected we should take it */
+                const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+                matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+                if (depth==0) goto _storeSequence;
+        }   }
+
+        /* first search (depth 0) */
+        {   size_t offsetFound = 99999999;
+            size_t const ml2 = searchMax(ms, cParams, ip, iend, &offsetFound);
+            if (ml2 > matchLength)
+                matchLength = ml2, start = ip, offset=offsetFound;
+        }
+
+         if (matchLength < 4) {
+            ip += ((ip-anchor) >> kSearchStrength) + 1;   /* jump faster over incompressible sections */
+            continue;
+        }
+
+        /* let's try to find a better solution */
+        if (depth>=1)
+        while (ip<ilimit) {
+            ip ++;
+            current++;
+            /* check repCode */
+            if (offset) {
+                const U32 repIndex = (U32)(current - offset_1);
+                const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+                const BYTE* const repMatch = repBase + repIndex;
+                if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex))  /* intentional overflow */
+                if (MEM_read32(ip) == MEM_read32(repMatch)) {
+                    /* repcode detected */
+                    const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+                    size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+                    int const gain2 = (int)(repLength * 3);
+                    int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
+                    if ((repLength >= 4) && (gain2 > gain1))
+                        matchLength = repLength, offset = 0, start = ip;
+            }   }
+
+            /* search match, depth 1 */
+            {   size_t offset2=99999999;
+                size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
+                int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
+                int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
+                if ((ml2 >= 4) && (gain2 > gain1)) {
+                    matchLength = ml2, offset = offset2, start = ip;
+                    continue;   /* search a better one */
+            }   }
+
+            /* let's find an even better one */
+            if ((depth==2) && (ip<ilimit)) {
+                ip ++;
+                current++;
+                /* check repCode */
+                if (offset) {
+                    const U32 repIndex = (U32)(current - offset_1);
+                    const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+                    const BYTE* const repMatch = repBase + repIndex;
+                    if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex))  /* intentional overflow */
+                    if (MEM_read32(ip) == MEM_read32(repMatch)) {
+                        /* repcode detected */
+                        const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+                        size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+                        int const gain2 = (int)(repLength * 4);
+                        int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
+                        if ((repLength >= 4) && (gain2 > gain1))
+                            matchLength = repLength, offset = 0, start = ip;
+                }   }
+
+                /* search match, depth 2 */
+                {   size_t offset2=99999999;
+                    size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
+                    int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
+                    int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
+                    if ((ml2 >= 4) && (gain2 > gain1)) {
+                        matchLength = ml2, offset = offset2, start = ip;
+                        continue;
+            }   }   }
+            break;  /* nothing found : store previous solution */
+        }
+
+        /* catch up */
+        if (offset) {
+            U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
+            const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
+            const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
+            while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; }  /* catch up */
+            offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
+        }
+
+        /* store sequence */
+_storeSequence:
+        {   size_t const litLength = start - anchor;
+            ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
+            anchor = ip = start + matchLength;
+        }
+
+        /* check immediate repcode */
+        while (ip <= ilimit) {
+            const U32 repIndex = (U32)((ip-base) - offset_2);
+            const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
+            const BYTE* const repMatch = repBase + repIndex;
+            if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex))  /* intentional overflow */
+            if (MEM_read32(ip) == MEM_read32(repMatch)) {
+                /* repcode detected we should take it */
+                const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
+                matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
+                offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset;   /* swap offset history */
+                ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
+                ip += matchLength;
+                anchor = ip;
+                continue;   /* faster when present ... (?) */
+            }
+            break;
+    }   }
+
+    /* Save reps for next block */
+    rep[0] = offset_1;
+    rep[1] = offset_2;
+
+    /* Return the last literals size */
+    return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_greedy_extDict(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+{
+    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 0);
+}
+
+size_t ZSTD_compressBlock_lazy_extDict(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+
+{
+    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 1);
+}
+
+size_t ZSTD_compressBlock_lazy2_extDict(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+
+{
+    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 2);
+}
+
+size_t ZSTD_compressBlock_btlazy2_extDict(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+
+{
+    return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 1, 2);
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_lazy.h b/vendor/github.com/DataDog/zstd/zstd_lazy.h
new file mode 100644
index 0000000..bda064f
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_lazy.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_LAZY_H
+#define ZSTD_LAZY_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "zstd_compress_internal.h"
+
+U32 ZSTD_insertAndFindFirstIndex(
+        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+        const BYTE* ip);
+
+void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue);  /*! used in ZSTD_reduceIndex(). pre-emptively increase value of ZSTD_DUBT_UNSORTED_MARK */
+
+size_t ZSTD_compressBlock_btlazy2(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_greedy(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_greedy_extDict(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy_extDict(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_lazy2_extDict(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btlazy2_extDict(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_LAZY_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_ldm.c b/vendor/github.com/DataDog/zstd/zstd_ldm.c
new file mode 100644
index 0000000..bffd8a3
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_ldm.c
@@ -0,0 +1,653 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ */
+
+#include "zstd_ldm.h"
+
+#include "zstd_fast.h"          /* ZSTD_fillHashTable() */
+#include "zstd_double_fast.h"   /* ZSTD_fillDoubleHashTable() */
+
+#define LDM_BUCKET_SIZE_LOG 3
+#define LDM_MIN_MATCH_LENGTH 64
+#define LDM_HASH_RLOG 7
+#define LDM_HASH_CHAR_OFFSET 10
+
+void ZSTD_ldm_adjustParameters(ldmParams_t* params,
+                               ZSTD_compressionParameters const* cParams)
+{
+    U32 const windowLog = cParams->windowLog;
+    ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
+    DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
+    if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
+    if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
+    if (cParams->strategy >= ZSTD_btopt) {
+      /* Get out of the way of the optimal parser */
+      U32 const minMatch = MAX(cParams->targetLength, params->minMatchLength);
+      assert(minMatch >= ZSTD_LDM_MINMATCH_MIN);
+      assert(minMatch <= ZSTD_LDM_MINMATCH_MAX);
+      params->minMatchLength = minMatch;
+    }
+    if (params->hashLog == 0) {
+        params->hashLog = MAX(ZSTD_HASHLOG_MIN, windowLog - LDM_HASH_RLOG);
+        assert(params->hashLog <= ZSTD_HASHLOG_MAX);
+    }
+    if (params->hashEveryLog == 0) {
+        params->hashEveryLog =
+                windowLog < params->hashLog ? 0 : windowLog - params->hashLog;
+    }
+    params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
+}
+
+size_t ZSTD_ldm_getTableSize(ldmParams_t params)
+{
+    size_t const ldmHSize = ((size_t)1) << params.hashLog;
+    size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
+    size_t const ldmBucketSize =
+        ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
+    size_t const totalSize = ldmBucketSize + ldmHSize * sizeof(ldmEntry_t);
+    return params.enableLdm ? totalSize : 0;
+}
+
+size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
+{
+    return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0;
+}
+
+/** ZSTD_ldm_getSmallHash() :
+ *  numBits should be <= 32
+ *  If numBits==0, returns 0.
+ *  @return : the most significant numBits of value. */
+static U32 ZSTD_ldm_getSmallHash(U64 value, U32 numBits)
+{
+    assert(numBits <= 32);
+    return numBits == 0 ? 0 : (U32)(value >> (64 - numBits));
+}
+
+/** ZSTD_ldm_getChecksum() :
+ *  numBitsToDiscard should be <= 32
+ *  @return : the next most significant 32 bits after numBitsToDiscard */
+static U32 ZSTD_ldm_getChecksum(U64 hash, U32 numBitsToDiscard)
+{
+    assert(numBitsToDiscard <= 32);
+    return (hash >> (64 - 32 - numBitsToDiscard)) & 0xFFFFFFFF;
+}
+
+/** ZSTD_ldm_getTag() ;
+ *  Given the hash, returns the most significant numTagBits bits
+ *  after (32 + hbits) bits.
+ *
+ *  If there are not enough bits remaining, return the last
+ *  numTagBits bits. */
+static U32 ZSTD_ldm_getTag(U64 hash, U32 hbits, U32 numTagBits)
+{
+    assert(numTagBits < 32 && hbits <= 32);
+    if (32 - hbits < numTagBits) {
+        return hash & (((U32)1 << numTagBits) - 1);
+    } else {
+        return (hash >> (32 - hbits - numTagBits)) & (((U32)1 << numTagBits) - 1);
+    }
+}
+
+/** ZSTD_ldm_getBucket() :
+ *  Returns a pointer to the start of the bucket associated with hash. */
+static ldmEntry_t* ZSTD_ldm_getBucket(
+        ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
+{
+    return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
+}
+
+/** ZSTD_ldm_insertEntry() :
+ *  Insert the entry with corresponding hash into the hash table */
+static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
+                                 size_t const hash, const ldmEntry_t entry,
+                                 ldmParams_t const ldmParams)
+{
+    BYTE* const bucketOffsets = ldmState->bucketOffsets;
+    *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + bucketOffsets[hash]) = entry;
+    bucketOffsets[hash]++;
+    bucketOffsets[hash] &= ((U32)1 << ldmParams.bucketSizeLog) - 1;
+}
+
+/** ZSTD_ldm_makeEntryAndInsertByTag() :
+ *
+ *  Gets the small hash, checksum, and tag from the rollingHash.
+ *
+ *  If the tag matches (1 << ldmParams.hashEveryLog)-1, then
+ *  creates an ldmEntry from the offset, and inserts it into the hash table.
+ *
+ *  hBits is the length of the small hash, which is the most significant hBits
+ *  of rollingHash. The checksum is the next 32 most significant bits, followed
+ *  by ldmParams.hashEveryLog bits that make up the tag. */
+static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState,
+                                             U64 const rollingHash,
+                                             U32 const hBits,
+                                             U32 const offset,
+                                             ldmParams_t const ldmParams)
+{
+    U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog);
+    U32 const tagMask = ((U32)1 << ldmParams.hashEveryLog) - 1;
+    if (tag == tagMask) {
+        U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits);
+        U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
+        ldmEntry_t entry;
+        entry.offset = offset;
+        entry.checksum = checksum;
+        ZSTD_ldm_insertEntry(ldmState, hash, entry, ldmParams);
+    }
+}
+
+/** ZSTD_ldm_getRollingHash() :
+ *  Get a 64-bit hash using the first len bytes from buf.
+ *
+ *  Giving bytes s = s_1, s_2, ... s_k, the hash is defined to be
+ *  H(s) = s_1*(a^(k-1)) + s_2*(a^(k-2)) + ... + s_k*(a^0)
+ *
+ *  where the constant a is defined to be prime8bytes.
+ *
+ *  The implementation adds an offset to each byte, so
+ *  H(s) = (s_1 + HASH_CHAR_OFFSET)*(a^(k-1)) + ... */
+static U64 ZSTD_ldm_getRollingHash(const BYTE* buf, U32 len)
+{
+    U64 ret = 0;
+    U32 i;
+    for (i = 0; i < len; i++) {
+        ret *= prime8bytes;
+        ret += buf[i] + LDM_HASH_CHAR_OFFSET;
+    }
+    return ret;
+}
+
+/** ZSTD_ldm_ipow() :
+ *  Return base^exp. */
+static U64 ZSTD_ldm_ipow(U64 base, U64 exp)
+{
+    U64 ret = 1;
+    while (exp) {
+        if (exp & 1) { ret *= base; }
+        exp >>= 1;
+        base *= base;
+    }
+    return ret;
+}
+
+U64 ZSTD_ldm_getHashPower(U32 minMatchLength) {
+    DEBUGLOG(4, "ZSTD_ldm_getHashPower: mml=%u", minMatchLength);
+    assert(minMatchLength >= ZSTD_LDM_MINMATCH_MIN);
+    return ZSTD_ldm_ipow(prime8bytes, minMatchLength - 1);
+}
+
+/** ZSTD_ldm_updateHash() :
+ *  Updates hash by removing toRemove and adding toAdd. */
+static U64 ZSTD_ldm_updateHash(U64 hash, BYTE toRemove, BYTE toAdd, U64 hashPower)
+{
+    hash -= ((toRemove + LDM_HASH_CHAR_OFFSET) * hashPower);
+    hash *= prime8bytes;
+    hash += toAdd + LDM_HASH_CHAR_OFFSET;
+    return hash;
+}
+
+/** ZSTD_ldm_countBackwardsMatch() :
+ *  Returns the number of bytes that match backwards before pIn and pMatch.
+ *
+ *  We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
+static size_t ZSTD_ldm_countBackwardsMatch(
+            const BYTE* pIn, const BYTE* pAnchor,
+            const BYTE* pMatch, const BYTE* pBase)
+{
+    size_t matchLength = 0;
+    while (pIn > pAnchor && pMatch > pBase && pIn[-1] == pMatch[-1]) {
+        pIn--;
+        pMatch--;
+        matchLength++;
+    }
+    return matchLength;
+}
+
+/** ZSTD_ldm_fillFastTables() :
+ *
+ *  Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
+ *  This is similar to ZSTD_loadDictionaryContent.
+ *
+ *  The tables for the other strategies are filled within their
+ *  block compressors. */
+static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
+                                      ZSTD_compressionParameters const* cParams,
+                                      void const* end)
+{
+    const BYTE* const iend = (const BYTE*)end;
+
+    switch(cParams->strategy)
+    {
+    case ZSTD_fast:
+        ZSTD_fillHashTable(ms, cParams, iend);
+        ms->nextToUpdate = (U32)(iend - ms->window.base);
+        break;
+
+    case ZSTD_dfast:
+        ZSTD_fillDoubleHashTable(ms, cParams, iend);
+        ms->nextToUpdate = (U32)(iend - ms->window.base);
+        break;
+
+    case ZSTD_greedy:
+    case ZSTD_lazy:
+    case ZSTD_lazy2:
+    case ZSTD_btlazy2:
+    case ZSTD_btopt:
+    case ZSTD_btultra:
+        break;
+    default:
+        assert(0);  /* not possible : not a valid strategy id */
+    }
+
+    return 0;
+}
+
+/** ZSTD_ldm_fillLdmHashTable() :
+ *
+ *  Fills hashTable from (lastHashed + 1) to iend (non-inclusive).
+ *  lastHash is the rolling hash that corresponds to lastHashed.
+ *
+ *  Returns the rolling hash corresponding to position iend-1. */
+static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state,
+                                     U64 lastHash, const BYTE* lastHashed,
+                                     const BYTE* iend, const BYTE* base,
+                                     U32 hBits, ldmParams_t const ldmParams)
+{
+    U64 rollingHash = lastHash;
+    const BYTE* cur = lastHashed + 1;
+
+    while (cur < iend) {
+        rollingHash = ZSTD_ldm_updateHash(rollingHash, cur[-1],
+                                          cur[ldmParams.minMatchLength-1],
+                                          state->hashPower);
+        ZSTD_ldm_makeEntryAndInsertByTag(state,
+                                         rollingHash, hBits,
+                                         (U32)(cur - base), ldmParams);
+        ++cur;
+    }
+    return rollingHash;
+}
+
+
+/** ZSTD_ldm_limitTableUpdate() :
+ *
+ *  Sets cctx->nextToUpdate to a position corresponding closer to anchor
+ *  if it is far way
+ *  (after a long match, only update tables a limited amount). */
+static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
+{
+    U32 const current = (U32)(anchor - ms->window.base);
+    if (current > ms->nextToUpdate + 1024) {
+        ms->nextToUpdate =
+            current - MIN(512, current - ms->nextToUpdate - 1024);
+    }
+}
+
+static size_t ZSTD_ldm_generateSequences_internal(
+        ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
+        ldmParams_t const* params, void const* src, size_t srcSize)
+{
+    /* LDM parameters */
+    int const extDict = ZSTD_window_hasExtDict(ldmState->window);
+    U32 const minMatchLength = params->minMatchLength;
+    U64 const hashPower = ldmState->hashPower;
+    U32 const hBits = params->hashLog - params->bucketSizeLog;
+    U32 const ldmBucketSize = 1U << params->bucketSizeLog;
+    U32 const hashEveryLog = params->hashEveryLog;
+    U32 const ldmTagMask = (1U << params->hashEveryLog) - 1;
+    /* Prefix and extDict parameters */
+    U32 const dictLimit = ldmState->window.dictLimit;
+    U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
+    BYTE const* const base = ldmState->window.base;
+    BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
+    BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
+    BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
+    BYTE const* const lowPrefixPtr = base + dictLimit;
+    /* Input bounds */
+    BYTE const* const istart = (BYTE const*)src;
+    BYTE const* const iend = istart + srcSize;
+    BYTE const* const ilimit = iend - MAX(minMatchLength, HASH_READ_SIZE);
+    /* Input positions */
+    BYTE const* anchor = istart;
+    BYTE const* ip = istart;
+    /* Rolling hash */
+    BYTE const* lastHashed = NULL;
+    U64 rollingHash = 0;
+
+    while (ip <= ilimit) {
+        size_t mLength;
+        U32 const current = (U32)(ip - base);
+        size_t forwardMatchLength = 0, backwardMatchLength = 0;
+        ldmEntry_t* bestEntry = NULL;
+        if (ip != istart) {
+            rollingHash = ZSTD_ldm_updateHash(rollingHash, lastHashed[0],
+                                              lastHashed[minMatchLength],
+                                              hashPower);
+        } else {
+            rollingHash = ZSTD_ldm_getRollingHash(ip, minMatchLength);
+        }
+        lastHashed = ip;
+
+        /* Do not insert and do not look for a match */
+        if (ZSTD_ldm_getTag(rollingHash, hBits, hashEveryLog) != ldmTagMask) {
+           ip++;
+           continue;
+        }
+
+        /* Get the best entry and compute the match lengths */
+        {
+            ldmEntry_t* const bucket =
+                ZSTD_ldm_getBucket(ldmState,
+                                   ZSTD_ldm_getSmallHash(rollingHash, hBits),
+                                   *params);
+            ldmEntry_t* cur;
+            size_t bestMatchLength = 0;
+            U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
+
+            for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) {
+                size_t curForwardMatchLength, curBackwardMatchLength,
+                       curTotalMatchLength;
+                if (cur->checksum != checksum || cur->offset <= lowestIndex) {
+                    continue;
+                }
+                if (extDict) {
+                    BYTE const* const curMatchBase =
+                        cur->offset < dictLimit ? dictBase : base;
+                    BYTE const* const pMatch = curMatchBase + cur->offset;
+                    BYTE const* const matchEnd =
+                        cur->offset < dictLimit ? dictEnd : iend;
+                    BYTE const* const lowMatchPtr =
+                        cur->offset < dictLimit ? dictStart : lowPrefixPtr;
+
+                    curForwardMatchLength = ZSTD_count_2segments(
+                                                ip, pMatch, iend,
+                                                matchEnd, lowPrefixPtr);
+                    if (curForwardMatchLength < minMatchLength) {
+                        continue;
+                    }
+                    curBackwardMatchLength =
+                        ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch,
+                                                     lowMatchPtr);
+                    curTotalMatchLength = curForwardMatchLength +
+                                          curBackwardMatchLength;
+                } else { /* !extDict */
+                    BYTE const* const pMatch = base + cur->offset;
+                    curForwardMatchLength = ZSTD_count(ip, pMatch, iend);
+                    if (curForwardMatchLength < minMatchLength) {
+                        continue;
+                    }
+                    curBackwardMatchLength =
+                        ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch,
+                                                     lowPrefixPtr);
+                    curTotalMatchLength = curForwardMatchLength +
+                                          curBackwardMatchLength;
+                }
+
+                if (curTotalMatchLength > bestMatchLength) {
+                    bestMatchLength = curTotalMatchLength;
+                    forwardMatchLength = curForwardMatchLength;
+                    backwardMatchLength = curBackwardMatchLength;
+                    bestEntry = cur;
+                }
+            }
+        }
+
+        /* No match found -- continue searching */
+        if (bestEntry == NULL) {
+            ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash,
+                                             hBits, current,
+                                             *params);
+            ip++;
+            continue;
+        }
+
+        /* Match found */
+        mLength = forwardMatchLength + backwardMatchLength;
+        ip -= backwardMatchLength;
+
+        {
+            /* Store the sequence:
+             * ip = current - backwardMatchLength
+             * The match is at (bestEntry->offset - backwardMatchLength)
+             */
+            U32 const matchIndex = bestEntry->offset;
+            U32 const offset = current - matchIndex;
+            rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
+
+            /* Out of sequence storage */
+            if (rawSeqStore->size == rawSeqStore->capacity)
+                return ERROR(dstSize_tooSmall);
+            seq->litLength = (U32)(ip - anchor);
+            seq->matchLength = (U32)mLength;
+            seq->offset = offset;
+            rawSeqStore->size++;
+        }
+
+        /* Insert the current entry into the hash table */
+        ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
+                                         (U32)(lastHashed - base),
+                                         *params);
+
+        assert(ip + backwardMatchLength == lastHashed);
+
+        /* Fill the hash table from lastHashed+1 to ip+mLength*/
+        /* Heuristic: don't need to fill the entire table at end of block */
+        if (ip + mLength <= ilimit) {
+            rollingHash = ZSTD_ldm_fillLdmHashTable(
+                              ldmState, rollingHash, lastHashed,
+                              ip + mLength, base, hBits, *params);
+            lastHashed = ip + mLength - 1;
+        }
+        ip += mLength;
+        anchor = ip;
+    }
+    return iend - anchor;
+}
+
+/*! ZSTD_ldm_reduceTable() :
+ *  reduce table indexes by `reducerValue` */
+static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
+                                 U32 const reducerValue)
+{
+    U32 u;
+    for (u = 0; u < size; u++) {
+        if (table[u].offset < reducerValue) table[u].offset = 0;
+        else table[u].offset -= reducerValue;
+    }
+}
+
+size_t ZSTD_ldm_generateSequences(
+        ldmState_t* ldmState, rawSeqStore_t* sequences,
+        ldmParams_t const* params, void const* src, size_t srcSize)
+{
+    U32 const maxDist = 1U << params->windowLog;
+    BYTE const* const istart = (BYTE const*)src;
+    BYTE const* const iend = istart + srcSize;
+    size_t const kMaxChunkSize = 1 << 20;
+    size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
+    size_t chunk;
+    size_t leftoverSize = 0;
+
+    assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
+    /* Check that ZSTD_window_update() has been called for this chunk prior
+     * to passing it to this function.
+     */
+    assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
+    /* The input could be very large (in zstdmt), so it must be broken up into
+     * chunks to enforce the maximmum distance and handle overflow correction.
+     */
+    assert(sequences->pos <= sequences->size);
+    assert(sequences->size <= sequences->capacity);
+    for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
+        BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
+        size_t const remaining = (size_t)(iend - chunkStart);
+        BYTE const *const chunkEnd =
+            (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
+        size_t const chunkSize = chunkEnd - chunkStart;
+        size_t newLeftoverSize;
+        size_t const prevSize = sequences->size;
+
+        assert(chunkStart < iend);
+        /* 1. Perform overflow correction if necessary. */
+        if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) {
+            U32 const ldmHSize = 1U << params->hashLog;
+            U32 const correction = ZSTD_window_correctOverflow(
+                &ldmState->window, /* cycleLog */ 0, maxDist, src);
+            ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
+        }
+        /* 2. We enforce the maximum offset allowed.
+         *
+         * kMaxChunkSize should be small enough that we don't lose too much of
+         * the window through early invalidation.
+         * TODO: * Test the chunk size.
+         *       * Try invalidation after the sequence generation and test the
+         *         the offset against maxDist directly.
+         */
+        ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, NULL);
+        /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
+        newLeftoverSize = ZSTD_ldm_generateSequences_internal(
+            ldmState, sequences, params, chunkStart, chunkSize);
+        if (ZSTD_isError(newLeftoverSize))
+            return newLeftoverSize;
+        /* 4. We add the leftover literals from previous iterations to the first
+         *    newly generated sequence, or add the `newLeftoverSize` if none are
+         *    generated.
+         */
+        /* Prepend the leftover literals from the last call */
+        if (prevSize < sequences->size) {
+            sequences->seq[prevSize].litLength += (U32)leftoverSize;
+            leftoverSize = newLeftoverSize;
+        } else {
+            assert(newLeftoverSize == chunkSize);
+            leftoverSize += chunkSize;
+        }
+    }
+    return 0;
+}
+
+void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) {
+    while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
+        rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
+        if (srcSize <= seq->litLength) {
+            /* Skip past srcSize literals */
+            seq->litLength -= (U32)srcSize;
+            return;
+        }
+        srcSize -= seq->litLength;
+        seq->litLength = 0;
+        if (srcSize < seq->matchLength) {
+            /* Skip past the first srcSize of the match */
+            seq->matchLength -= (U32)srcSize;
+            if (seq->matchLength < minMatch) {
+                /* The match is too short, omit it */
+                if (rawSeqStore->pos + 1 < rawSeqStore->size) {
+                    seq[1].litLength += seq[0].matchLength;
+                }
+                rawSeqStore->pos++;
+            }
+            return;
+        }
+        srcSize -= seq->matchLength;
+        seq->matchLength = 0;
+        rawSeqStore->pos++;
+    }
+}
+
+/**
+ * If the sequence length is longer than remaining then the sequence is split
+ * between this block and the next.
+ *
+ * Returns the current sequence to handle, or if the rest of the block should
+ * be literals, it returns a sequence with offset == 0.
+ */
+static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
+                                 U32 const remaining, U32 const minMatch)
+{
+    rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
+    assert(sequence.offset > 0);
+    /* Likely: No partial sequence */
+    if (remaining >= sequence.litLength + sequence.matchLength) {
+        rawSeqStore->pos++;
+        return sequence;
+    }
+    /* Cut the sequence short (offset == 0 ==> rest is literals). */
+    if (remaining <= sequence.litLength) {
+        sequence.offset = 0;
+    } else if (remaining < sequence.litLength + sequence.matchLength) {
+        sequence.matchLength = remaining - sequence.litLength;
+        if (sequence.matchLength < minMatch) {
+            sequence.offset = 0;
+        }
+    }
+    /* Skip past `remaining` bytes for the future sequences. */
+    ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
+    return sequence;
+}
+
+size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
+    ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+    ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize,
+    int const extDict)
+{
+    unsigned const minMatch = cParams->searchLength;
+    ZSTD_blockCompressor const blockCompressor =
+        ZSTD_selectBlockCompressor(cParams->strategy, extDict);
+    BYTE const* const base = ms->window.base;
+    /* Input bounds */
+    BYTE const* const istart = (BYTE const*)src;
+    BYTE const* const iend = istart + srcSize;
+    /* Input positions */
+    BYTE const* ip = istart;
+
+    assert(rawSeqStore->pos <= rawSeqStore->size);
+    assert(rawSeqStore->size <= rawSeqStore->capacity);
+    /* Loop through each sequence and apply the block compressor to the lits */
+    while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
+        /* maybeSplitSequence updates rawSeqStore->pos */
+        rawSeq const sequence = maybeSplitSequence(rawSeqStore,
+                                                   (U32)(iend - ip), minMatch);
+        int i;
+        /* End signal */
+        if (sequence.offset == 0)
+            break;
+
+        assert(sequence.offset <= (1U << cParams->windowLog));
+        assert(ip + sequence.litLength + sequence.matchLength <= iend);
+
+        /* Fill tables for block compressor */
+        ZSTD_ldm_limitTableUpdate(ms, ip);
+        ZSTD_ldm_fillFastTables(ms, cParams, ip);
+        /* Run the block compressor */
+        {
+            size_t const newLitLength =
+                blockCompressor(ms, seqStore, rep, cParams, ip,
+                                sequence.litLength);
+            ip += sequence.litLength;
+            ms->nextToUpdate = (U32)(ip - base);
+            /* Update the repcodes */
+            for (i = ZSTD_REP_NUM - 1; i > 0; i--)
+                rep[i] = rep[i-1];
+            rep[0] = sequence.offset;
+            /* Store the sequence */
+            ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength,
+                          sequence.offset + ZSTD_REP_MOVE,
+                          sequence.matchLength - MINMATCH);
+            ip += sequence.matchLength;
+        }
+    }
+    /* Fill the tables for the block compressor */
+    ZSTD_ldm_limitTableUpdate(ms, ip);
+    ZSTD_ldm_fillFastTables(ms, cParams, ip);
+    /* Compress the last literals */
+    {
+        size_t const lastLiterals = blockCompressor(ms, seqStore, rep, cParams,
+                                                    ip, iend - ip);
+        ms->nextToUpdate = (U32)(iend - base);
+        return lastLiterals;
+    }
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_ldm.h b/vendor/github.com/DataDog/zstd/zstd_ldm.h
new file mode 100644
index 0000000..0c3789f
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_ldm.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ */
+
+#ifndef ZSTD_LDM_H
+#define ZSTD_LDM_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "zstd_compress_internal.h"   /* ldmParams_t, U32 */
+#include "zstd.h"   /* ZSTD_CCtx, size_t */
+
+/*-*************************************
+*  Long distance matching
+***************************************/
+
+#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_DEFAULTMAX
+
+/**
+ * ZSTD_ldm_generateSequences():
+ *
+ * Generates the sequences using the long distance match finder.
+ * Generates long range matching sequences in `sequences`, which parse a prefix
+ * of the source. `sequences` must be large enough to store every sequence,
+ * which can be checked with `ZSTD_ldm_getMaxNbSeq()`.
+ * @returns 0 or an error code.
+ *
+ * NOTE: The user must have called ZSTD_window_update() for all of the input
+ * they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks.
+ * NOTE: This function returns an error if it runs out of space to store
+ *       sequences.
+ */
+size_t ZSTD_ldm_generateSequences(
+            ldmState_t* ldms, rawSeqStore_t* sequences,
+            ldmParams_t const* params, void const* src, size_t srcSize);
+
+/**
+ * ZSTD_ldm_blockCompress():
+ *
+ * Compresses a block using the predefined sequences, along with a secondary
+ * block compressor. The literals section of every sequence is passed to the
+ * secondary block compressor, and those sequences are interspersed with the
+ * predefined sequences. Returns the length of the last literals.
+ * Updates `rawSeqStore.pos` to indicate how many sequences have been consumed.
+ * `rawSeqStore.seq` may also be updated to split the last sequence between two
+ * blocks.
+ * @return The length of the last literals.
+ *
+ * NOTE: The source must be at most the maximum block size, but the predefined
+ * sequences can be any size, and may be longer than the block. In the case that
+ * they are longer than the block, the last sequences may need to be split into
+ * two. We handle that case correctly, and update `rawSeqStore` appropriately.
+ * NOTE: This function does not return any errors.
+ */
+size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
+            ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+            ZSTD_compressionParameters const* cParams,
+            void const* src, size_t srcSize,
+            int const extDict);
+
+/**
+ * ZSTD_ldm_skipSequences():
+ *
+ * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`.
+ * Avoids emitting matches less than `minMatch` bytes.
+ * Must be called for data with is not passed to ZSTD_ldm_blockCompress().
+ */
+void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize,
+    U32 const minMatch);
+
+
+/** ZSTD_ldm_getTableSize() :
+ *  Estimate the space needed for long distance matching tables or 0 if LDM is
+ *  disabled.
+ */
+size_t ZSTD_ldm_getTableSize(ldmParams_t params);
+
+/** ZSTD_ldm_getSeqSpace() :
+ *  Return an upper bound on the number of sequences that can be produced by
+ *  the long distance matcher, or 0 if LDM is disabled.
+ */
+size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize);
+
+/** ZSTD_ldm_getTableSize() :
+ *  Return prime8bytes^(minMatchLength-1) */
+U64 ZSTD_ldm_getHashPower(U32 minMatchLength);
+
+/** ZSTD_ldm_adjustParameters() :
+ *  If the params->hashEveryLog is not set, set it to its default value based on
+ *  windowLog and params->hashLog.
+ *
+ *  Ensures that params->bucketSizeLog is <= params->hashLog (setting it to
+ *  params->hashLog if it is not).
+ *
+ *  Ensures that the minMatchLength >= targetLength during optimal parsing.
+ */
+void ZSTD_ldm_adjustParameters(ldmParams_t* params,
+                               ZSTD_compressionParameters const* cParams);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_FAST_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_legacy.h b/vendor/github.com/DataDog/zstd/zstd_legacy.h
new file mode 100644
index 0000000..5893cb9
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_legacy.h
@@ -0,0 +1,381 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_LEGACY_H
+#define ZSTD_LEGACY_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* *************************************
+*  Includes
+***************************************/
+#include "mem.h"            /* MEM_STATIC */
+#include "error_private.h"  /* ERROR */
+#include "zstd.h"           /* ZSTD_inBuffer, ZSTD_outBuffer */
+
+#if !defined (ZSTD_LEGACY_SUPPORT) || (ZSTD_LEGACY_SUPPORT == 0)
+#  undef ZSTD_LEGACY_SUPPORT
+#  define ZSTD_LEGACY_SUPPORT 8
+#endif
+
+#if (ZSTD_LEGACY_SUPPORT <= 1)
+#  include "zstd_v01.h"
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 2)
+#  include "zstd_v02.h"
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 3)
+#  include "zstd_v03.h"
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 4)
+#  include "zstd_v04.h"
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 5)
+#  include "zstd_v05.h"
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
+#  include "zstd_v06.h"
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
+#  include "zstd_v07.h"
+#endif
+
+/** ZSTD_isLegacy() :
+    @return : > 0 if supported by legacy decoder. 0 otherwise.
+              return value is the version.
+*/
+MEM_STATIC unsigned ZSTD_isLegacy(const void* src, size_t srcSize)
+{
+    U32 magicNumberLE;
+    if (srcSize<4) return 0;
+    magicNumberLE = MEM_readLE32(src);
+    switch(magicNumberLE)
+    {
+#if (ZSTD_LEGACY_SUPPORT <= 1)
+        case ZSTDv01_magicNumberLE:return 1;
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 2)
+        case ZSTDv02_magicNumber : return 2;
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 3)
+        case ZSTDv03_magicNumber : return 3;
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 4)
+        case ZSTDv04_magicNumber : return 4;
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 5)
+        case ZSTDv05_MAGICNUMBER : return 5;
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
+        case ZSTDv06_MAGICNUMBER : return 6;
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
+        case ZSTDv07_MAGICNUMBER : return 7;
+#endif
+        default : return 0;
+    }
+}
+
+
+MEM_STATIC unsigned long long ZSTD_getDecompressedSize_legacy(const void* src, size_t srcSize)
+{
+    U32 const version = ZSTD_isLegacy(src, srcSize);
+    if (version < 5) return 0;  /* no decompressed size in frame header, or not a legacy format */
+#if (ZSTD_LEGACY_SUPPORT <= 5)
+    if (version==5) {
+        ZSTDv05_parameters fParams;
+        size_t const frResult = ZSTDv05_getFrameParams(&fParams, src, srcSize);
+        if (frResult != 0) return 0;
+        return fParams.srcSize;
+    }
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
+    if (version==6) {
+        ZSTDv06_frameParams fParams;
+        size_t const frResult = ZSTDv06_getFrameParams(&fParams, src, srcSize);
+        if (frResult != 0) return 0;
+        return fParams.frameContentSize;
+    }
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
+    if (version==7) {
+        ZSTDv07_frameParams fParams;
+        size_t const frResult = ZSTDv07_getFrameParams(&fParams, src, srcSize);
+        if (frResult != 0) return 0;
+        return fParams.frameContentSize;
+    }
+#endif
+    return 0;   /* should not be possible */
+}
+
+
+MEM_STATIC size_t ZSTD_decompressLegacy(
+                     void* dst, size_t dstCapacity,
+               const void* src, size_t compressedSize,
+               const void* dict,size_t dictSize)
+{
+    U32 const version = ZSTD_isLegacy(src, compressedSize);
+    (void)dst; (void)dstCapacity; (void)dict; (void)dictSize;  /* unused when ZSTD_LEGACY_SUPPORT >= 8 */
+    switch(version)
+    {
+#if (ZSTD_LEGACY_SUPPORT <= 1)
+        case 1 :
+            return ZSTDv01_decompress(dst, dstCapacity, src, compressedSize);
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 2)
+        case 2 :
+            return ZSTDv02_decompress(dst, dstCapacity, src, compressedSize);
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 3)
+        case 3 :
+            return ZSTDv03_decompress(dst, dstCapacity, src, compressedSize);
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 4)
+        case 4 :
+            return ZSTDv04_decompress(dst, dstCapacity, src, compressedSize);
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 5)
+        case 5 :
+            {   size_t result;
+                ZSTDv05_DCtx* const zd = ZSTDv05_createDCtx();
+                if (zd==NULL) return ERROR(memory_allocation);
+                result = ZSTDv05_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize);
+                ZSTDv05_freeDCtx(zd);
+                return result;
+            }
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
+        case 6 :
+            {   size_t result;
+                ZSTDv06_DCtx* const zd = ZSTDv06_createDCtx();
+                if (zd==NULL) return ERROR(memory_allocation);
+                result = ZSTDv06_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize);
+                ZSTDv06_freeDCtx(zd);
+                return result;
+            }
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
+        case 7 :
+            {   size_t result;
+                ZSTDv07_DCtx* const zd = ZSTDv07_createDCtx();
+                if (zd==NULL) return ERROR(memory_allocation);
+                result = ZSTDv07_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize);
+                ZSTDv07_freeDCtx(zd);
+                return result;
+            }
+#endif
+        default :
+            return ERROR(prefix_unknown);
+    }
+}
+
+MEM_STATIC size_t ZSTD_findFrameCompressedSizeLegacy(const void *src,
+                                             size_t compressedSize)
+{
+    U32 const version = ZSTD_isLegacy(src, compressedSize);
+    switch(version)
+    {
+#if (ZSTD_LEGACY_SUPPORT <= 1)
+        case 1 :
+            return ZSTDv01_findFrameCompressedSize(src, compressedSize);
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 2)
+        case 2 :
+            return ZSTDv02_findFrameCompressedSize(src, compressedSize);
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 3)
+        case 3 :
+            return ZSTDv03_findFrameCompressedSize(src, compressedSize);
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 4)
+        case 4 :
+            return ZSTDv04_findFrameCompressedSize(src, compressedSize);
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 5)
+        case 5 :
+            return ZSTDv05_findFrameCompressedSize(src, compressedSize);
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
+        case 6 :
+            return ZSTDv06_findFrameCompressedSize(src, compressedSize);
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
+        case 7 :
+            return ZSTDv07_findFrameCompressedSize(src, compressedSize);
+#endif
+        default :
+            return ERROR(prefix_unknown);
+    }
+}
+
+MEM_STATIC size_t ZSTD_freeLegacyStreamContext(void* legacyContext, U32 version)
+{
+    switch(version)
+    {
+        default :
+        case 1 :
+        case 2 :
+        case 3 :
+            (void)legacyContext;
+            return ERROR(version_unsupported);
+#if (ZSTD_LEGACY_SUPPORT <= 4)
+        case 4 : return ZBUFFv04_freeDCtx((ZBUFFv04_DCtx*)legacyContext);
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 5)
+        case 5 : return ZBUFFv05_freeDCtx((ZBUFFv05_DCtx*)legacyContext);
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
+        case 6 : return ZBUFFv06_freeDCtx((ZBUFFv06_DCtx*)legacyContext);
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
+        case 7 : return ZBUFFv07_freeDCtx((ZBUFFv07_DCtx*)legacyContext);
+#endif
+    }
+}
+
+
+MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U32 newVersion,
+                                        const void* dict, size_t dictSize)
+{
+    DEBUGLOG(5, "ZSTD_initLegacyStream for v0.%u", newVersion);
+    if (prevVersion != newVersion) ZSTD_freeLegacyStreamContext(*legacyContext, prevVersion);
+    switch(newVersion)
+    {
+        default :
+        case 1 :
+        case 2 :
+        case 3 :
+            (void)dict; (void)dictSize;
+            return 0;
+#if (ZSTD_LEGACY_SUPPORT <= 4)
+        case 4 :
+        {
+            ZBUFFv04_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv04_createDCtx() : (ZBUFFv04_DCtx*)*legacyContext;
+            if (dctx==NULL) return ERROR(memory_allocation);
+            ZBUFFv04_decompressInit(dctx);
+            ZBUFFv04_decompressWithDictionary(dctx, dict, dictSize);
+            *legacyContext = dctx;
+            return 0;
+        }
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 5)
+        case 5 :
+        {
+            ZBUFFv05_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv05_createDCtx() : (ZBUFFv05_DCtx*)*legacyContext;
+            if (dctx==NULL) return ERROR(memory_allocation);
+            ZBUFFv05_decompressInitDictionary(dctx, dict, dictSize);
+            *legacyContext = dctx;
+            return 0;
+        }
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
+        case 6 :
+        {
+            ZBUFFv06_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv06_createDCtx() : (ZBUFFv06_DCtx*)*legacyContext;
+            if (dctx==NULL) return ERROR(memory_allocation);
+            ZBUFFv06_decompressInitDictionary(dctx, dict, dictSize);
+            *legacyContext = dctx;
+            return 0;
+        }
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
+        case 7 :
+        {
+            ZBUFFv07_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv07_createDCtx() : (ZBUFFv07_DCtx*)*legacyContext;
+            if (dctx==NULL) return ERROR(memory_allocation);
+            ZBUFFv07_decompressInitDictionary(dctx, dict, dictSize);
+            *legacyContext = dctx;
+            return 0;
+        }
+#endif
+    }
+}
+
+
+
+MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
+                                              ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+    DEBUGLOG(5, "ZSTD_decompressLegacyStream for v0.%u", version);
+    switch(version)
+    {
+        default :
+        case 1 :
+        case 2 :
+        case 3 :
+            (void)legacyContext; (void)output; (void)input;
+            return ERROR(version_unsupported);
+#if (ZSTD_LEGACY_SUPPORT <= 4)
+        case 4 :
+            {
+                ZBUFFv04_DCtx* dctx = (ZBUFFv04_DCtx*) legacyContext;
+                const void* src = (const char*)input->src + input->pos;
+                size_t readSize = input->size - input->pos;
+                void* dst = (char*)output->dst + output->pos;
+                size_t decodedSize = output->size - output->pos;
+                size_t const hintSize = ZBUFFv04_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
+                output->pos += decodedSize;
+                input->pos += readSize;
+                return hintSize;
+            }
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 5)
+        case 5 :
+            {
+                ZBUFFv05_DCtx* dctx = (ZBUFFv05_DCtx*) legacyContext;
+                const void* src = (const char*)input->src + input->pos;
+                size_t readSize = input->size - input->pos;
+                void* dst = (char*)output->dst + output->pos;
+                size_t decodedSize = output->size - output->pos;
+                size_t const hintSize = ZBUFFv05_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
+                output->pos += decodedSize;
+                input->pos += readSize;
+                return hintSize;
+            }
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 6)
+        case 6 :
+            {
+                ZBUFFv06_DCtx* dctx = (ZBUFFv06_DCtx*) legacyContext;
+                const void* src = (const char*)input->src + input->pos;
+                size_t readSize = input->size - input->pos;
+                void* dst = (char*)output->dst + output->pos;
+                size_t decodedSize = output->size - output->pos;
+                size_t const hintSize = ZBUFFv06_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
+                output->pos += decodedSize;
+                input->pos += readSize;
+                return hintSize;
+            }
+#endif
+#if (ZSTD_LEGACY_SUPPORT <= 7)
+        case 7 :
+            {
+                ZBUFFv07_DCtx* dctx = (ZBUFFv07_DCtx*) legacyContext;
+                const void* src = (const char*)input->src + input->pos;
+                size_t readSize = input->size - input->pos;
+                void* dst = (char*)output->dst + output->pos;
+                size_t decodedSize = output->size - output->pos;
+                size_t const hintSize = ZBUFFv07_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
+                output->pos += decodedSize;
+                input->pos += readSize;
+                return hintSize;
+            }
+#endif
+    }
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif   /* ZSTD_LEGACY_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_opt.c b/vendor/github.com/DataDog/zstd/zstd_opt.c
new file mode 100644
index 0000000..f63f0c5
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_opt.c
@@ -0,0 +1,923 @@
+/*
+ * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#include "zstd_compress_internal.h"
+#include "zstd_opt.h"
+
+
+#define ZSTD_LITFREQ_ADD    2   /* scaling factor for litFreq, so that frequencies adapt faster to new stats. Also used for matchSum (?) */
+#define ZSTD_FREQ_DIV       4   /* log factor when using previous stats to init next stats */
+#define ZSTD_MAX_PRICE     (1<<30)
+
+
+/*-*************************************
+*  Price functions for optimal parser
+***************************************/
+static void ZSTD_setLog2Prices(optState_t* optPtr)
+{
+    optPtr->log2litSum = ZSTD_highbit32(optPtr->litSum+1);
+    optPtr->log2litLengthSum = ZSTD_highbit32(optPtr->litLengthSum+1);
+    optPtr->log2matchLengthSum = ZSTD_highbit32(optPtr->matchLengthSum+1);
+    optPtr->log2offCodeSum = ZSTD_highbit32(optPtr->offCodeSum+1);
+}
+
+
+static void ZSTD_rescaleFreqs(optState_t* const optPtr,
+                              const BYTE* const src, size_t const srcSize)
+{
+    optPtr->staticPrices = 0;
+
+    if (optPtr->litLengthSum == 0) {  /* first init */
+        unsigned u;
+        if (srcSize <= 1024) optPtr->staticPrices = 1;
+
+        assert(optPtr->litFreq!=NULL);
+        for (u=0; u<=MaxLit; u++)
+            optPtr->litFreq[u] = 0;
+        for (u=0; u<srcSize; u++)
+            optPtr->litFreq[src[u]]++;
+        optPtr->litSum = 0;
+        for (u=0; u<=MaxLit; u++) {
+            optPtr->litFreq[u] = 1 + (optPtr->litFreq[u] >> ZSTD_FREQ_DIV);
+            optPtr->litSum += optPtr->litFreq[u];
+        }
+
+        for (u=0; u<=MaxLL; u++)
+            optPtr->litLengthFreq[u] = 1;
+        optPtr->litLengthSum = MaxLL+1;
+        for (u=0; u<=MaxML; u++)
+            optPtr->matchLengthFreq[u] = 1;
+        optPtr->matchLengthSum = MaxML+1;
+        for (u=0; u<=MaxOff; u++)
+            optPtr->offCodeFreq[u] = 1;
+        optPtr->offCodeSum = (MaxOff+1);
+
+    } else {
+        unsigned u;
+
+        optPtr->litSum = 0;
+        for (u=0; u<=MaxLit; u++) {
+            optPtr->litFreq[u] = 1 + (optPtr->litFreq[u] >> (ZSTD_FREQ_DIV+1));
+            optPtr->litSum += optPtr->litFreq[u];
+        }
+        optPtr->litLengthSum = 0;
+        for (u=0; u<=MaxLL; u++) {
+            optPtr->litLengthFreq[u] = 1 + (optPtr->litLengthFreq[u]>>(ZSTD_FREQ_DIV+1));
+            optPtr->litLengthSum += optPtr->litLengthFreq[u];
+        }
+        optPtr->matchLengthSum = 0;
+        for (u=0; u<=MaxML; u++) {
+            optPtr->matchLengthFreq[u] = 1 + (optPtr->matchLengthFreq[u]>>ZSTD_FREQ_DIV);
+            optPtr->matchLengthSum += optPtr->matchLengthFreq[u];
+        }
+        optPtr->offCodeSum = 0;
+        for (u=0; u<=MaxOff; u++) {
+            optPtr->offCodeFreq[u] = 1 + (optPtr->offCodeFreq[u]>>ZSTD_FREQ_DIV);
+            optPtr->offCodeSum += optPtr->offCodeFreq[u];
+        }
+    }
+
+    ZSTD_setLog2Prices(optPtr);
+}
+
+
+/* ZSTD_rawLiteralsCost() :
+ * cost of literals (only) in given segment (which length can be null)
+ * does not include cost of literalLength symbol */
+static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
+                                const optState_t* const optPtr)
+{
+    if (optPtr->staticPrices) return (litLength*6);  /* 6 bit per literal - no statistic used */
+    if (litLength == 0) return 0;
+
+    /* literals */
+    {   U32 u;
+        U32 cost = litLength * optPtr->log2litSum;
+        for (u=0; u < litLength; u++)
+            cost -= ZSTD_highbit32(optPtr->litFreq[literals[u]]+1);
+        return cost;
+    }
+}
+
+/* ZSTD_litLengthPrice() :
+ * cost of literalLength symbol */
+static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr)
+{
+    if (optPtr->staticPrices) return ZSTD_highbit32((U32)litLength+1);
+
+    /* literal Length */
+    {   U32 const llCode = ZSTD_LLcode(litLength);
+        U32 const price = LL_bits[llCode] + optPtr->log2litLengthSum - ZSTD_highbit32(optPtr->litLengthFreq[llCode]+1);
+        return price;
+    }
+}
+
+/* ZSTD_litLengthPrice() :
+ * cost of the literal part of a sequence,
+ * including literals themselves, and literalLength symbol */
+static U32 ZSTD_fullLiteralsCost(const BYTE* const literals, U32 const litLength,
+                                 const optState_t* const optPtr)
+{
+    return ZSTD_rawLiteralsCost(literals, litLength, optPtr)
+         + ZSTD_litLengthPrice(litLength, optPtr);
+}
+
+/* ZSTD_litLengthContribution() :
+ * @return ( cost(litlength) - cost(0) )
+ * this value can then be added to rawLiteralsCost()
+ * to provide a cost which is directly comparable to a match ending at same position */
+static int ZSTD_litLengthContribution(U32 const litLength, const optState_t* const optPtr)
+{
+    if (optPtr->staticPrices) return ZSTD_highbit32(litLength+1);
+
+    /* literal Length */
+    {   U32 const llCode = ZSTD_LLcode(litLength);
+        int const contribution = LL_bits[llCode]
+                        + ZSTD_highbit32(optPtr->litLengthFreq[0]+1)
+                        - ZSTD_highbit32(optPtr->litLengthFreq[llCode]+1);
+#if 1
+        return contribution;
+#else
+        return MAX(0, contribution); /* sometimes better, sometimes not ... */
+#endif
+    }
+}
+
+/* ZSTD_literalsContribution() :
+ * creates a fake cost for the literals part of a sequence
+ * which can be compared to the ending cost of a match
+ * should a new match start at this position */
+static int ZSTD_literalsContribution(const BYTE* const literals, U32 const litLength,
+                                     const optState_t* const optPtr)
+{
+    int const contribution = ZSTD_rawLiteralsCost(literals, litLength, optPtr)
+                           + ZSTD_litLengthContribution(litLength, optPtr);
+    return contribution;
+}
+
+/* ZSTD_getMatchPrice() :
+ * Provides the cost of the match part (offset + matchLength) of a sequence
+ * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence.
+ * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */
+FORCE_INLINE_TEMPLATE U32 ZSTD_getMatchPrice(
+                                    U32 const offset, U32 const matchLength,
+                                    const optState_t* const optPtr,
+                                    int const optLevel)
+{
+    U32 price;
+    U32 const offCode = ZSTD_highbit32(offset+1);
+    U32 const mlBase = matchLength - MINMATCH;
+    assert(matchLength >= MINMATCH);
+
+    if (optPtr->staticPrices)  /* fixed scheme, do not use statistics */
+        return ZSTD_highbit32((U32)mlBase+1) + 16 + offCode;
+
+    price = offCode + optPtr->log2offCodeSum - ZSTD_highbit32(optPtr->offCodeFreq[offCode]+1);
+    if ((optLevel<2) /*static*/ && offCode >= 20) price += (offCode-19)*2; /* handicap for long distance offsets, favor decompression speed */
+
+    /* match Length */
+    {   U32 const mlCode = ZSTD_MLcode(mlBase);
+        price += ML_bits[mlCode] + optPtr->log2matchLengthSum - ZSTD_highbit32(optPtr->matchLengthFreq[mlCode]+1);
+    }
+
+    DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price);
+    return price;
+}
+
+static void ZSTD_updateStats(optState_t* const optPtr,
+                             U32 litLength, const BYTE* literals,
+                             U32 offsetCode, U32 matchLength)
+{
+    /* literals */
+    {   U32 u;
+        for (u=0; u < litLength; u++)
+            optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
+        optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
+    }
+
+    /* literal Length */
+    {   U32 const llCode = ZSTD_LLcode(litLength);
+        optPtr->litLengthFreq[llCode]++;
+        optPtr->litLengthSum++;
+    }
+
+    /* match offset code (0-2=>repCode; 3+=>offset+2) */
+    {   U32 const offCode = ZSTD_highbit32(offsetCode+1);
+        assert(offCode <= MaxOff);
+        optPtr->offCodeFreq[offCode]++;
+        optPtr->offCodeSum++;
+    }
+
+    /* match Length */
+    {   U32 const mlBase = matchLength - MINMATCH;
+        U32 const mlCode = ZSTD_MLcode(mlBase);
+        optPtr->matchLengthFreq[mlCode]++;
+        optPtr->matchLengthSum++;
+    }
+}
+
+
+/* ZSTD_readMINMATCH() :
+ * function safe only for comparisons
+ * assumption : memPtr must be at least 4 bytes before end of buffer */
+MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
+{
+    switch (length)
+    {
+    default :
+    case 4 : return MEM_read32(memPtr);
+    case 3 : if (MEM_isLittleEndian())
+                return MEM_read32(memPtr)<<8;
+             else
+                return MEM_read32(memPtr)>>8;
+    }
+}
+
+
+/* Update hashTable3 up to ip (excluded)
+   Assumption : always within prefix (i.e. not within extDict) */
+static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms, const BYTE* const ip)
+{
+    U32* const hashTable3 = ms->hashTable3;
+    U32 const hashLog3 = ms->hashLog3;
+    const BYTE* const base = ms->window.base;
+    U32 idx = ms->nextToUpdate3;
+    U32 const target = ms->nextToUpdate3 = (U32)(ip - base);
+    size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3);
+    assert(hashLog3 > 0);
+
+    while(idx < target) {
+        hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
+        idx++;
+    }
+
+    return hashTable3[hash3];
+}
+
+
+/*-*************************************
+*  Binary Tree search
+***************************************/
+/** ZSTD_insertBt1() : add one or multiple positions to tree.
+ *  ip : assumed <= iend-8 .
+ * @return : nb of positions added */
+static U32 ZSTD_insertBt1(
+                ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                const BYTE* const ip, const BYTE* const iend,
+                U32 const mls, U32 const extDict)
+{
+    U32*   const hashTable = ms->hashTable;
+    U32    const hashLog = cParams->hashLog;
+    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
+    U32*   const bt = ms->chainTable;
+    U32    const btLog  = cParams->chainLog - 1;
+    U32    const btMask = (1 << btLog) - 1;
+    U32 matchIndex = hashTable[h];
+    size_t commonLengthSmaller=0, commonLengthLarger=0;
+    const BYTE* const base = ms->window.base;
+    const BYTE* const dictBase = ms->window.dictBase;
+    const U32 dictLimit = ms->window.dictLimit;
+    const BYTE* const dictEnd = dictBase + dictLimit;
+    const BYTE* const prefixStart = base + dictLimit;
+    const BYTE* match;
+    const U32 current = (U32)(ip-base);
+    const U32 btLow = btMask >= current ? 0 : current - btMask;
+    U32* smallerPtr = bt + 2*(current&btMask);
+    U32* largerPtr  = smallerPtr + 1;
+    U32 dummy32;   /* to be nullified at the end */
+    U32 const windowLow = ms->window.lowLimit;
+    U32 matchEndIdx = current+8+1;
+    size_t bestLength = 8;
+    U32 nbCompares = 1U << cParams->searchLog;
+#ifdef ZSTD_C_PREDICT
+    U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0);
+    U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1);
+    predictedSmall += (predictedSmall>0);
+    predictedLarge += (predictedLarge>0);
+#endif /* ZSTD_C_PREDICT */
+
+    DEBUGLOG(8, "ZSTD_insertBt1 (%u)", current);
+
+    assert(ip <= iend-8);   /* required for h calculation */
+    hashTable[h] = current;   /* Update Hash Table */
+
+    while (nbCompares-- && (matchIndex > windowLow)) {
+        U32* const nextPtr = bt + 2*(matchIndex & btMask);
+        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
+        assert(matchIndex < current);
+
+#ifdef ZSTD_C_PREDICT   /* note : can create issues when hlog small <= 11 */
+        const U32* predictPtr = bt + 2*((matchIndex-1) & btMask);   /* written this way, as bt is a roll buffer */
+        if (matchIndex == predictedSmall) {
+            /* no need to check length, result known */
+            *smallerPtr = matchIndex;
+            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
+            smallerPtr = nextPtr+1;               /* new "smaller" => larger of match */
+            matchIndex = nextPtr[1];              /* new matchIndex larger than previous (closer to current) */
+            predictedSmall = predictPtr[1] + (predictPtr[1]>0);
+            continue;
+        }
+        if (matchIndex == predictedLarge) {
+            *largerPtr = matchIndex;
+            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
+            largerPtr = nextPtr;
+            matchIndex = nextPtr[0];
+            predictedLarge = predictPtr[0] + (predictPtr[0]>0);
+            continue;
+        }
+#endif
+
+        if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
+            assert(matchIndex+matchLength >= dictLimit);   /* might be wrong if extDict is incorrectly set to 0 */
+            match = base + matchIndex;
+            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
+        } else {
+            match = dictBase + matchIndex;
+            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
+            if (matchIndex+matchLength >= dictLimit)
+                match = base + matchIndex;   /* to prepare for next usage of match[matchLength] */
+        }
+
+        if (matchLength > bestLength) {
+            bestLength = matchLength;
+            if (matchLength > matchEndIdx - matchIndex)
+                matchEndIdx = matchIndex + (U32)matchLength;
+        }
+
+        if (ip+matchLength == iend) {   /* equal : no way to know if inf or sup */
+            break;   /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
+        }
+
+        if (match[matchLength] < ip[matchLength]) {  /* necessarily within buffer */
+            /* match is smaller than current */
+            *smallerPtr = matchIndex;             /* update smaller idx */
+            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
+            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
+            smallerPtr = nextPtr+1;               /* new "candidate" => larger than match, which was smaller than target */
+            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous and closer to current */
+        } else {
+            /* match is larger than current */
+            *largerPtr = matchIndex;
+            commonLengthLarger = matchLength;
+            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop searching */
+            largerPtr = nextPtr;
+            matchIndex = nextPtr[0];
+    }   }
+
+    *smallerPtr = *largerPtr = 0;
+    if (bestLength > 384) return MIN(192, (U32)(bestLength - 384));   /* speed optimization */
+    assert(matchEndIdx > current + 8);
+    return matchEndIdx - (current + 8);
+}
+
+FORCE_INLINE_TEMPLATE
+void ZSTD_updateTree_internal(
+                ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                const BYTE* const ip, const BYTE* const iend,
+                const U32 mls, const U32 extDict)
+{
+    const BYTE* const base = ms->window.base;
+    U32 const target = (U32)(ip - base);
+    U32 idx = ms->nextToUpdate;
+    DEBUGLOG(7, "ZSTD_updateTree_internal, from %u to %u  (extDict:%u)",
+                idx, target, extDict);
+
+    while(idx < target)
+        idx += ZSTD_insertBt1(ms, cParams, base+idx, iend, mls, extDict);
+    ms->nextToUpdate = target;
+}
+
+void ZSTD_updateTree(
+                ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                const BYTE* ip, const BYTE* iend)
+{
+    ZSTD_updateTree_internal(ms, cParams, ip, iend, cParams->searchLength, 0 /*extDict*/);
+}
+
+FORCE_INLINE_TEMPLATE
+U32 ZSTD_insertBtAndGetAllMatches (
+                    ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                    const BYTE* const ip, const BYTE* const iLimit, int const extDict,
+                    U32 rep[ZSTD_REP_NUM], U32 const ll0,
+                    ZSTD_match_t* matches, const U32 lengthToBeat, U32 const mls /* template */)
+{
+    U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
+    const BYTE* const base = ms->window.base;
+    U32 const current = (U32)(ip-base);
+    U32 const hashLog = cParams->hashLog;
+    U32 const minMatch = (mls==3) ? 3 : 4;
+    U32* const hashTable = ms->hashTable;
+    size_t const h  = ZSTD_hashPtr(ip, hashLog, mls);
+    U32 matchIndex  = hashTable[h];
+    U32* const bt   = ms->chainTable;
+    U32 const btLog = cParams->chainLog - 1;
+    U32 const btMask= (1U << btLog) - 1;
+    size_t commonLengthSmaller=0, commonLengthLarger=0;
+    const BYTE* const dictBase = ms->window.dictBase;
+    U32 const dictLimit = ms->window.dictLimit;
+    const BYTE* const dictEnd = dictBase + dictLimit;
+    const BYTE* const prefixStart = base + dictLimit;
+    U32 const btLow = btMask >= current ? 0 : current - btMask;
+    U32 const windowLow = ms->window.lowLimit;
+    U32* smallerPtr = bt + 2*(current&btMask);
+    U32* largerPtr  = bt + 2*(current&btMask) + 1;
+    U32 matchEndIdx = current+8+1;   /* farthest referenced position of any match => detects repetitive patterns */
+    U32 dummy32;   /* to be nullified at the end */
+    U32 mnum = 0;
+    U32 nbCompares = 1U << cParams->searchLog;
+
+    size_t bestLength = lengthToBeat-1;
+    DEBUGLOG(7, "ZSTD_insertBtAndGetAllMatches");
+
+    /* check repCode */
+    {   U32 const lastR = ZSTD_REP_NUM + ll0;
+        U32 repCode;
+        for (repCode = ll0; repCode < lastR; repCode++) {
+            U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
+            U32 const repIndex = current - repOffset;
+            U32 repLen = 0;
+            assert(current >= dictLimit);
+            if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < current-dictLimit) {  /* equivalent to `current > repIndex >= dictLimit` */
+                if (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch)) {
+                    repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch;
+                }
+            } else {  /* repIndex < dictLimit || repIndex >= current */
+                const BYTE* const repMatch = dictBase + repIndex;
+                assert(current >= windowLow);
+                if ( extDict /* this case only valid in extDict mode */
+                  && ( ((repOffset-1) /*intentional overflow*/ < current - windowLow)  /* equivalent to `current > repIndex >= windowLow` */
+                     & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */)
+                  && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) {
+                    repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch;
+            }   }
+            /* save longer solution */
+            if (repLen > bestLength) {
+                DEBUGLOG(8, "found rep-match %u of length %u",
+                            repCode - ll0, (U32)repLen);
+                bestLength = repLen;
+                matches[mnum].off = repCode - ll0;
+                matches[mnum].len = (U32)repLen;
+                mnum++;
+                if ( (repLen > sufficient_len)
+                   | (ip+repLen == iLimit) ) {  /* best possible */
+                    return mnum;
+    }   }   }   }
+
+    /* HC3 match finder */
+    if ((mls == 3) /*static*/ && (bestLength < mls)) {
+        U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, ip);
+        if ((matchIndex3 > windowLow)
+          & (current - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) {
+            size_t mlen;
+            if ((!extDict) /*static*/ || (matchIndex3 >= dictLimit)) {
+                const BYTE* const match = base + matchIndex3;
+                mlen = ZSTD_count(ip, match, iLimit);
+            } else {
+                const BYTE* const match = dictBase + matchIndex3;
+                mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart);
+            }
+
+            /* save best solution */
+            if (mlen >= mls /* == 3 > bestLength */) {
+                DEBUGLOG(8, "found small match with hlog3, of length %u",
+                            (U32)mlen);
+                bestLength = mlen;
+                assert(current > matchIndex3);
+                assert(mnum==0);  /* no prior solution */
+                matches[0].off = (current - matchIndex3) + ZSTD_REP_MOVE;
+                matches[0].len = (U32)mlen;
+                mnum = 1;
+                if ( (mlen > sufficient_len) |
+                     (ip+mlen == iLimit) ) {  /* best possible length */
+                    ms->nextToUpdate = current+1;  /* skip insertion */
+                    return 1;
+    }   }   }   }
+
+    hashTable[h] = current;   /* Update Hash Table */
+
+    while (nbCompares-- && (matchIndex > windowLow)) {
+        U32* const nextPtr = bt + 2*(matchIndex & btMask);
+        size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);   /* guaranteed minimum nb of common bytes */
+        const BYTE* match;
+        assert(current > matchIndex);
+
+        if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
+            assert(matchIndex+matchLength >= dictLimit);  /* ensure the condition is correct when !extDict */
+            match = base + matchIndex;
+            matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit);
+        } else {
+            match = dictBase + matchIndex;
+            matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
+            if (matchIndex+matchLength >= dictLimit)
+                match = base + matchIndex;   /* prepare for match[matchLength] */
+        }
+
+        if (matchLength > bestLength) {
+            DEBUGLOG(8, "found match of length %u at distance %u",
+                    (U32)matchLength, current - matchIndex);
+            assert(matchEndIdx > matchIndex);
+            if (matchLength > matchEndIdx - matchIndex)
+                matchEndIdx = matchIndex + (U32)matchLength;
+            bestLength = matchLength;
+            matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE;
+            matches[mnum].len = (U32)matchLength;
+            mnum++;
+            if (matchLength > ZSTD_OPT_NUM) break;
+            if (ip+matchLength == iLimit) {  /* equal : no way to know if inf or sup */
+                break;   /* drop, to preserve bt consistency (miss a little bit of compression) */
+            }
+        }
+
+        if (match[matchLength] < ip[matchLength]) {
+            /* match smaller than current */
+            *smallerPtr = matchIndex;             /* update smaller idx */
+            commonLengthSmaller = matchLength;    /* all smaller will now have at least this guaranteed common length */
+            if (matchIndex <= btLow) { smallerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
+            smallerPtr = nextPtr+1;               /* new candidate => larger than match, which was smaller than current */
+            matchIndex = nextPtr[1];              /* new matchIndex, larger than previous, closer to current */
+        } else {
+            *largerPtr = matchIndex;
+            commonLengthLarger = matchLength;
+            if (matchIndex <= btLow) { largerPtr=&dummy32; break; }   /* beyond tree size, stop the search */
+            largerPtr = nextPtr;
+            matchIndex = nextPtr[0];
+    }   }
+
+    *smallerPtr = *largerPtr = 0;
+
+    assert(matchEndIdx > current+8);
+    ms->nextToUpdate = matchEndIdx - 8;  /* skip repetitive patterns */
+    return mnum;
+}
+
+
+FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
+                        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+                        const BYTE* ip, const BYTE* const iHighLimit, int const extDict,
+                        U32 rep[ZSTD_REP_NUM], U32 const ll0,
+                        ZSTD_match_t* matches, U32 const lengthToBeat)
+{
+    U32 const matchLengthSearch = cParams->searchLength;
+    DEBUGLOG(7, "ZSTD_BtGetAllMatches");
+    if (ip < ms->window.base + ms->nextToUpdate) return 0;   /* skipped area */
+    ZSTD_updateTree_internal(ms, cParams, ip, iHighLimit, matchLengthSearch, extDict);
+    switch(matchLengthSearch)
+    {
+    case 3 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 3);
+    default :
+    case 4 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 4);
+    case 5 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 5);
+    case 7 :
+    case 6 : return ZSTD_insertBtAndGetAllMatches(ms, cParams, ip, iHighLimit, extDict, rep, ll0, matches, lengthToBeat, 6);
+    }
+}
+
+
+/*-*******************************
+*  Optimal parser
+*********************************/
+typedef struct repcodes_s {
+    U32 rep[3];
+} repcodes_t;
+
+repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
+{
+    repcodes_t newReps;
+    if (offset >= ZSTD_REP_NUM) {  /* full offset */
+        newReps.rep[2] = rep[1];
+        newReps.rep[1] = rep[0];
+        newReps.rep[0] = offset - ZSTD_REP_MOVE;
+    } else {   /* repcode */
+        U32 const repCode = offset + ll0;
+        if (repCode > 0) {  /* note : if repCode==0, no change */
+            U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
+            newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2];
+            newReps.rep[1] = rep[0];
+            newReps.rep[0] = currentOffset;
+        } else {   /* repCode == 0 */
+            memcpy(&newReps, rep, sizeof(newReps));
+        }
+    }
+    return newReps;
+}
+
+
+typedef struct {
+    const BYTE* anchor;
+    U32 litlen;
+    U32 rawLitCost;
+} cachedLiteralPrice_t;
+
+static U32 ZSTD_rawLiteralsCost_cached(
+                            cachedLiteralPrice_t* const cachedLitPrice,
+                            const BYTE* const anchor, U32 const litlen,
+                            const optState_t* const optStatePtr)
+{
+    U32 startCost;
+    U32 remainingLength;
+    const BYTE* startPosition;
+
+    if (anchor == cachedLitPrice->anchor) {
+        startCost = cachedLitPrice->rawLitCost;
+        startPosition = anchor + cachedLitPrice->litlen;
+        assert(litlen >= cachedLitPrice->litlen);
+        remainingLength = litlen - cachedLitPrice->litlen;
+    } else {
+        startCost = 0;
+        startPosition = anchor;
+        remainingLength = litlen;
+    }
+
+    {   U32 const rawLitCost = startCost + ZSTD_rawLiteralsCost(startPosition, remainingLength, optStatePtr);
+        cachedLitPrice->anchor = anchor;
+        cachedLitPrice->litlen = litlen;
+        cachedLitPrice->rawLitCost = rawLitCost;
+        return rawLitCost;
+    }
+}
+
+static U32 ZSTD_fullLiteralsCost_cached(
+                            cachedLiteralPrice_t* const cachedLitPrice,
+                            const BYTE* const anchor, U32 const litlen,
+                            const optState_t* const optStatePtr)
+{
+    return ZSTD_rawLiteralsCost_cached(cachedLitPrice, anchor, litlen, optStatePtr)
+         + ZSTD_litLengthPrice(litlen, optStatePtr);
+}
+
+static int ZSTD_literalsContribution_cached(
+                            cachedLiteralPrice_t* const cachedLitPrice,
+                            const BYTE* const anchor, U32 const litlen,
+                            const optState_t* const optStatePtr)
+{
+    int const contribution = ZSTD_rawLiteralsCost_cached(cachedLitPrice, anchor, litlen, optStatePtr)
+                           + ZSTD_litLengthContribution(litlen, optStatePtr);
+    return contribution;
+}
+
+FORCE_INLINE_TEMPLATE
+size_t ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,seqStore_t* seqStore,
+                                      U32 rep[ZSTD_REP_NUM],
+                                      ZSTD_compressionParameters const* cParams,
+                                      const void* src, size_t srcSize,
+                                      const int optLevel, const int extDict)
+{
+    optState_t* const optStatePtr = &ms->opt;
+    const BYTE* const istart = (const BYTE*)src;
+    const BYTE* ip = istart;
+    const BYTE* anchor = istart;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* const ilimit = iend - 8;
+    const BYTE* const base = ms->window.base;
+    const BYTE* const prefixStart = base + ms->window.dictLimit;
+
+    U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1);
+    U32 const minMatch = (cParams->searchLength == 3) ? 3 : 4;
+
+    ZSTD_optimal_t* const opt = optStatePtr->priceTable;
+    ZSTD_match_t* const matches = optStatePtr->matchTable;
+    cachedLiteralPrice_t cachedLitPrice;
+
+    /* init */
+    DEBUGLOG(5, "ZSTD_compressBlock_opt_generic");
+    ms->nextToUpdate3 = ms->nextToUpdate;
+    ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize);
+    ip += (ip==prefixStart);
+    memset(&cachedLitPrice, 0, sizeof(cachedLitPrice));
+
+    /* Match Loop */
+    while (ip < ilimit) {
+        U32 cur, last_pos = 0;
+        U32 best_mlen, best_off;
+
+        /* find first match */
+        {   U32 const litlen = (U32)(ip - anchor);
+            U32 const ll0 = !litlen;
+            U32 const nbMatches = ZSTD_BtGetAllMatches(ms, cParams, ip, iend, extDict, rep, ll0, matches, minMatch);
+            if (!nbMatches) { ip++; continue; }
+
+            /* initialize opt[0] */
+            { U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
+            opt[0].mlen = 1;
+            opt[0].litlen = litlen;
+
+            /* large match -> immediate encoding */
+            {   U32 const maxML = matches[nbMatches-1].len;
+                DEBUGLOG(7, "found %u matches of maxLength=%u and offset=%u at cPos=%u => start new serie",
+                            nbMatches, maxML, matches[nbMatches-1].off, (U32)(ip-prefixStart));
+
+                if (maxML > sufficient_len) {
+                    best_mlen = maxML;
+                    best_off = matches[nbMatches-1].off;
+                    DEBUGLOG(7, "large match (%u>%u), immediate encoding",
+                                best_mlen, sufficient_len);
+                    cur = 0;
+                    last_pos = 1;
+                    goto _shortestPath;
+            }   }
+
+            /* set prices for first matches starting position == 0 */
+            {   U32 const literalsPrice = ZSTD_fullLiteralsCost_cached(&cachedLitPrice, anchor, litlen, optStatePtr);
+                U32 pos;
+                U32 matchNb;
+                for (pos = 0; pos < minMatch; pos++) {
+                    opt[pos].mlen = 1;
+                    opt[pos].price = ZSTD_MAX_PRICE;
+                }
+                for (matchNb = 0; matchNb < nbMatches; matchNb++) {
+                    U32 const offset = matches[matchNb].off;
+                    U32 const end = matches[matchNb].len;
+                    repcodes_t const repHistory = ZSTD_updateRep(rep, offset, ll0);
+                    for ( ; pos <= end ; pos++ ) {
+                        U32 const matchPrice = literalsPrice + ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel);
+                        DEBUGLOG(7, "rPos:%u => set initial price : %u",
+                                    pos, matchPrice);
+                        opt[pos].mlen = pos;
+                        opt[pos].off = offset;
+                        opt[pos].litlen = litlen;
+                        opt[pos].price = matchPrice;
+                        memcpy(opt[pos].rep, &repHistory, sizeof(repHistory));
+                }   }
+                last_pos = pos-1;
+            }
+        }
+
+        /* check further positions */
+        for (cur = 1; cur <= last_pos; cur++) {
+            const BYTE* const inr = ip + cur;
+            assert(cur < ZSTD_OPT_NUM);
+
+            /* Fix current position with one literal if cheaper */
+            {   U32 const litlen = (opt[cur-1].mlen == 1) ? opt[cur-1].litlen + 1 : 1;
+                int price;  /* note : contribution can be negative */
+                if (cur > litlen) {
+                    price = opt[cur - litlen].price + ZSTD_literalsContribution(inr-litlen, litlen, optStatePtr);
+                } else {
+                    price = ZSTD_literalsContribution_cached(&cachedLitPrice, anchor, litlen, optStatePtr);
+                }
+                assert(price < 1000000000); /* overflow check */
+                if (price <= opt[cur].price) {
+                    DEBUGLOG(7, "rPos:%u : better price (%u<%u) using literal",
+                                cur, price, opt[cur].price);
+                    opt[cur].mlen = 1;
+                    opt[cur].off = 0;
+                    opt[cur].litlen = litlen;
+                    opt[cur].price = price;
+                    memcpy(opt[cur].rep, opt[cur-1].rep, sizeof(opt[cur].rep));
+            }   }
+
+            /* last match must start at a minimum distance of 8 from oend */
+            if (inr > ilimit) continue;
+
+            if (cur == last_pos) break;
+
+             if ( (optLevel==0) /*static*/
+               && (opt[cur+1].price <= opt[cur].price) )
+                continue;  /* skip unpromising positions; about ~+6% speed, -0.01 ratio */
+
+            {   U32 const ll0 = (opt[cur].mlen != 1);
+                U32 const litlen = (opt[cur].mlen == 1) ? opt[cur].litlen : 0;
+                U32 const previousPrice = (cur > litlen) ? opt[cur-litlen].price : 0;
+                U32 const basePrice = previousPrice + ZSTD_fullLiteralsCost(inr-litlen, litlen, optStatePtr);
+                U32 const nbMatches = ZSTD_BtGetAllMatches(ms, cParams, inr, iend, extDict, opt[cur].rep, ll0, matches, minMatch);
+                U32 matchNb;
+                if (!nbMatches) continue;
+
+                {   U32 const maxML = matches[nbMatches-1].len;
+                    DEBUGLOG(7, "rPos:%u, found %u matches, of maxLength=%u",
+                                cur, nbMatches, maxML);
+
+                    if ( (maxML > sufficient_len)
+                       | (cur + maxML >= ZSTD_OPT_NUM) ) {
+                        best_mlen = maxML;
+                        best_off = matches[nbMatches-1].off;
+                        last_pos = cur + 1;
+                        goto _shortestPath;
+                    }
+                }
+
+                /* set prices using matches found at position == cur */
+                for (matchNb = 0; matchNb < nbMatches; matchNb++) {
+                    U32 const offset = matches[matchNb].off;
+                    repcodes_t const repHistory = ZSTD_updateRep(opt[cur].rep, offset, ll0);
+                    U32 const lastML = matches[matchNb].len;
+                    U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch;
+                    U32 mlen;
+
+                    DEBUGLOG(7, "testing match %u => offCode=%u, mlen=%u, llen=%u",
+                                matchNb, matches[matchNb].off, lastML, litlen);
+
+                    for (mlen = lastML; mlen >= startML; mlen--) {
+                        U32 const pos = cur + mlen;
+                        int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel);
+
+                        if ((pos > last_pos) || (price < opt[pos].price)) {
+                            DEBUGLOG(7, "rPos:%u => new better price (%u<%u)",
+                                        pos, price, opt[pos].price);
+                            while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; }
+                            opt[pos].mlen = mlen;
+                            opt[pos].off = offset;
+                            opt[pos].litlen = litlen;
+                            opt[pos].price = price;
+                            memcpy(opt[pos].rep, &repHistory, sizeof(repHistory));
+                        } else {
+                            if (optLevel==0) break;  /* gets ~+10% speed for about -0.01 ratio loss */
+                        }
+            }   }   }
+        }  /* for (cur = 1; cur <= last_pos; cur++) */
+
+        best_mlen = opt[last_pos].mlen;
+        best_off = opt[last_pos].off;
+        cur = last_pos - best_mlen;
+
+_shortestPath:   /* cur, last_pos, best_mlen, best_off have to be set */
+        assert(opt[0].mlen == 1);
+
+        /* reverse traversal */
+        DEBUGLOG(7, "start reverse traversal (last_pos:%u, cur:%u)",
+                    last_pos, cur);
+        {   U32 selectedMatchLength = best_mlen;
+            U32 selectedOffset = best_off;
+            U32 pos = cur;
+            while (1) {
+                U32 const mlen = opt[pos].mlen;
+                U32 const off = opt[pos].off;
+                opt[pos].mlen = selectedMatchLength;
+                opt[pos].off = selectedOffset;
+                selectedMatchLength = mlen;
+                selectedOffset = off;
+                if (mlen > pos) break;
+                pos -= mlen;
+        }   }
+
+        /* save sequences */
+        {   U32 pos;
+            for (pos=0; pos < last_pos; ) {
+                U32 const llen = (U32)(ip - anchor);
+                U32 const mlen = opt[pos].mlen;
+                U32 const offset = opt[pos].off;
+                if (mlen == 1) { ip++; pos++; continue; }  /* literal position => move on */
+                pos += mlen; ip += mlen;
+
+                /* repcodes update : like ZSTD_updateRep(), but update in place */
+                if (offset >= ZSTD_REP_NUM) {  /* full offset */
+                    rep[2] = rep[1];
+                    rep[1] = rep[0];
+                    rep[0] = offset - ZSTD_REP_MOVE;
+                } else {   /* repcode */
+                    U32 const repCode = offset + (llen==0);
+                    if (repCode) {  /* note : if repCode==0, no change */
+                        U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
+                        if (repCode >= 2) rep[2] = rep[1];
+                        rep[1] = rep[0];
+                        rep[0] = currentOffset;
+                    }
+                }
+
+                ZSTD_updateStats(optStatePtr, llen, anchor, offset, mlen);
+                ZSTD_storeSeq(seqStore, llen, anchor, offset, mlen-MINMATCH);
+                anchor = ip;
+        }   }
+        ZSTD_setLog2Prices(optStatePtr);
+    }   /* while (ip < ilimit) */
+
+    /* Return the last literals size */
+    return iend - anchor;
+}
+
+
+size_t ZSTD_compressBlock_btopt(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+{
+    DEBUGLOG(5, "ZSTD_compressBlock_btopt");
+    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 0 /*optLevel*/, 0 /*extDict*/);
+}
+
+size_t ZSTD_compressBlock_btultra(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+{
+    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 2 /*optLevel*/, 0 /*extDict*/);
+}
+
+size_t ZSTD_compressBlock_btopt_extDict(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+{
+    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 0 /*optLevel*/, 1 /*extDict*/);
+}
+
+size_t ZSTD_compressBlock_btultra_extDict(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
+{
+    return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, cParams, src, srcSize, 2 /*optLevel*/, 1 /*extDict*/);
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_opt.h b/vendor/github.com/DataDog/zstd/zstd_opt.h
new file mode 100644
index 0000000..b8dc389
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_opt.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_OPT_H
+#define ZSTD_OPT_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "zstd_compress_internal.h"
+
+void ZSTD_updateTree(
+        ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
+        const BYTE* ip, const BYTE* iend);  /* used in ZSTD_loadDictionaryContent() */
+
+size_t ZSTD_compressBlock_btopt(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+
+size_t ZSTD_compressBlock_btopt_extDict(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+size_t ZSTD_compressBlock_btultra_extDict(
+        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
+        ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_OPT_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_stream.go b/vendor/github.com/DataDog/zstd/zstd_stream.go
new file mode 100644
index 0000000..d5d1336
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_stream.go
@@ -0,0 +1,291 @@
+package zstd
+
+/*
+#define ZSTD_STATIC_LINKING_ONLY
+#define ZBUFF_DISABLE_DEPRECATE_WARNINGS
+#include "zstd.h"
+#include "zbuff.h"
+*/
+import "C"
+import (
+	"errors"
+	"fmt"
+	"io"
+	"unsafe"
+)
+
+var errShortRead = errors.New("short read")
+
+// Writer is an io.WriteCloser that zstd-compresses its input.
+type Writer struct {
+	CompressionLevel int
+
+	ctx              *C.ZSTD_CCtx
+	dict             []byte
+	dstBuffer        []byte
+	firstError       error
+	underlyingWriter io.Writer
+}
+
+func resize(in []byte, newSize int) []byte {
+	if in == nil {
+		return make([]byte, newSize)
+	}
+	if newSize <= cap(in) {
+		return in[:newSize]
+	}
+	toAdd := newSize - len(in)
+	return append(in, make([]byte, toAdd)...)
+}
+
+// NewWriter creates a new Writer with default compression options.  Writes to
+// the writer will be written in compressed form to w.
+func NewWriter(w io.Writer) *Writer {
+	return NewWriterLevelDict(w, DefaultCompression, nil)
+}
+
+// NewWriterLevel is like NewWriter but specifies the compression level instead
+// of assuming default compression.
+//
+// The level can be DefaultCompression or any integer value between BestSpeed
+// and BestCompression inclusive.
+func NewWriterLevel(w io.Writer, level int) *Writer {
+	return NewWriterLevelDict(w, level, nil)
+
+}
+
+// NewWriterLevelDict is like NewWriterLevel but specifies a dictionary to
+// compress with.  If the dictionary is empty or nil it is ignored. The dictionary
+// should not be modified until the writer is closed.
+func NewWriterLevelDict(w io.Writer, level int, dict []byte) *Writer {
+	var err error
+	ctx := C.ZSTD_createCCtx()
+
+	if dict == nil {
+		err = getError(int(C.ZSTD_compressBegin(ctx,
+			C.int(level))))
+	} else {
+		err = getError(int(C.ZSTD_compressBegin_usingDict(
+			ctx,
+			unsafe.Pointer(&dict[0]),
+			C.size_t(len(dict)),
+			C.int(level))))
+	}
+
+	return &Writer{
+		CompressionLevel: level,
+		ctx:              ctx,
+		dict:             dict,
+		dstBuffer:        make([]byte, CompressBound(1024)),
+		firstError:       err,
+		underlyingWriter: w,
+	}
+}
+
+// Write writes a compressed form of p to the underlying io.Writer.
+func (w *Writer) Write(p []byte) (int, error) {
+	if w.firstError != nil {
+		return 0, w.firstError
+	}
+	if len(p) == 0 {
+		return 0, nil
+	}
+	// Check if dstBuffer is enough
+	if len(w.dstBuffer) < CompressBound(len(p)) {
+		w.dstBuffer = make([]byte, CompressBound(len(p)))
+	}
+
+	retCode := C.ZSTD_compressContinue(
+		w.ctx,
+		unsafe.Pointer(&w.dstBuffer[0]),
+		C.size_t(len(w.dstBuffer)),
+		unsafe.Pointer(&p[0]),
+		C.size_t(len(p)))
+
+	if err := getError(int(retCode)); err != nil {
+		return 0, err
+	}
+	written := int(retCode)
+
+	// Write to underlying buffer
+	_, err := w.underlyingWriter.Write(w.dstBuffer[:written])
+
+	// Same behaviour as zlib, we can't know how much data we wrote, only
+	// if there was an error
+	if err != nil {
+		return 0, err
+	}
+	return len(p), err
+}
+
+// Close closes the Writer, flushing any unwritten data to the underlying
+// io.Writer and freeing objects, but does not close the underlying io.Writer.
+func (w *Writer) Close() error {
+	retCode := C.ZSTD_compressEnd(
+		w.ctx,
+		unsafe.Pointer(&w.dstBuffer[0]),
+		C.size_t(len(w.dstBuffer)),
+		unsafe.Pointer(nil),
+		C.size_t(0))
+
+	if err := getError(int(retCode)); err != nil {
+		return err
+	}
+	written := int(retCode)
+	retCode = C.ZSTD_freeCCtx(w.ctx) // Safely close buffer before writing the end
+
+	if err := getError(int(retCode)); err != nil {
+		return err
+	}
+
+	_, err := w.underlyingWriter.Write(w.dstBuffer[:written])
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// reader is an io.ReadCloser that decompresses when read from.
+type reader struct {
+	ctx                 *C.ZBUFF_DCtx
+	compressionBuffer   []byte
+	compressionLeft     int
+	decompressionBuffer []byte
+	decompOff           int
+	decompSize          int
+	dict                []byte
+	firstError          error
+	recommendedSrcSize  int
+	underlyingReader    io.Reader
+}
+
+// NewReader creates a new io.ReadCloser.  Reads from the returned ReadCloser
+// read and decompress data from r.  It is the caller's responsibility to call
+// Close on the ReadCloser when done.  If this is not done, underlying objects
+// in the zstd library will not be freed.
+func NewReader(r io.Reader) io.ReadCloser {
+	return NewReaderDict(r, nil)
+}
+
+// NewReaderDict is like NewReader but uses a preset dictionary.  NewReaderDict
+// ignores the dictionary if it is nil.
+func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
+	var err error
+	ctx := C.ZBUFF_createDCtx()
+	if len(dict) == 0 {
+		err = getError(int(C.ZBUFF_decompressInit(ctx)))
+	} else {
+		err = getError(int(C.ZBUFF_decompressInitDictionary(
+			ctx,
+			unsafe.Pointer(&dict[0]),
+			C.size_t(len(dict)))))
+	}
+	cSize := int(C.ZBUFF_recommendedDInSize())
+	dSize := int(C.ZBUFF_recommendedDOutSize())
+	if cSize <= 0 {
+		panic(fmt.Errorf("ZBUFF_recommendedDInSize() returned invalid size: %v", cSize))
+	}
+	if dSize <= 0 {
+		panic(fmt.Errorf("ZBUFF_recommendedDOutSize() returned invalid size: %v", dSize))
+	}
+
+	compressionBuffer := make([]byte, cSize)
+	decompressionBuffer := make([]byte, dSize)
+	return &reader{
+		ctx:                 ctx,
+		dict:                dict,
+		compressionBuffer:   compressionBuffer,
+		decompressionBuffer: decompressionBuffer,
+		firstError:          err,
+		recommendedSrcSize:  cSize,
+		underlyingReader:    r,
+	}
+}
+
+// Close frees the allocated C objects
+func (r *reader) Close() error {
+	return getError(int(C.ZBUFF_freeDCtx(r.ctx)))
+}
+
+func (r *reader) Read(p []byte) (int, error) {
+
+	// If we already have enough bytes, return
+	if r.decompSize-r.decompOff >= len(p) {
+		copy(p, r.decompressionBuffer[r.decompOff:])
+		r.decompOff += len(p)
+		return len(p), nil
+	}
+
+	copy(p, r.decompressionBuffer[r.decompOff:r.decompSize])
+	got := r.decompSize - r.decompOff
+	r.decompSize = 0
+	r.decompOff = 0
+
+	for got < len(p) {
+		// Populate src
+		src := r.compressionBuffer
+		reader := r.underlyingReader
+		n, err := TryReadFull(reader, src[r.compressionLeft:])
+		if err != nil && err != errShortRead { // Handle underlying reader errors first
+			return 0, fmt.Errorf("failed to read from underlying reader: %s", err)
+		} else if n == 0 && r.compressionLeft == 0 {
+			return got, io.EOF
+		}
+		src = src[:r.compressionLeft+n]
+
+		// C code
+		cSrcSize := C.size_t(len(src))
+		cDstSize := C.size_t(len(r.decompressionBuffer))
+		retCode := int(C.ZBUFF_decompressContinue(
+			r.ctx,
+			unsafe.Pointer(&r.decompressionBuffer[0]),
+			&cDstSize,
+			unsafe.Pointer(&src[0]),
+			&cSrcSize))
+
+		if err = getError(retCode); err != nil {
+			return 0, fmt.Errorf("failed to decompress: %s", err)
+		}
+
+		// Put everything in buffer
+		if int(cSrcSize) < len(src) {
+			left := src[int(cSrcSize):]
+			copy(r.compressionBuffer, left)
+		}
+		r.compressionLeft = len(src) - int(cSrcSize)
+		r.decompSize = int(cDstSize)
+		r.decompOff = copy(p[got:], r.decompressionBuffer[:r.decompSize])
+		got += r.decompOff
+
+		// Resize buffers
+		nsize := retCode // Hint for next src buffer size
+		if nsize <= 0 {
+			// Reset to recommended size
+			nsize = r.recommendedSrcSize
+		}
+		if nsize < r.compressionLeft {
+			nsize = r.compressionLeft
+		}
+		r.compressionBuffer = resize(r.compressionBuffer, nsize)
+	}
+	return got, nil
+}
+
+// TryReadFull reads buffer just as ReadFull does
+// Here we expect that buffer may end and we do not return ErrUnexpectedEOF as ReadAtLeast does.
+// We return errShortRead instead to distinguish short reads and failures.
+// We cannot use ReadFull/ReadAtLeast because it masks Reader errors, such as network failures
+// and causes panic instead of error.
+func TryReadFull(r io.Reader, buf []byte) (n int, err error) {
+	for n < len(buf) && err == nil {
+		var nn int
+		nn, err = r.Read(buf[n:])
+		n += nn
+	}
+	if n == len(buf) && err == io.EOF {
+		err = nil // EOF at the end is somewhat expected
+	} else if err == io.EOF {
+		err = errShortRead
+	}
+	return
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_v01.c b/vendor/github.com/DataDog/zstd/zstd_v01.c
new file mode 100644
index 0000000..ae1cb2c
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v01.c
@@ -0,0 +1,2127 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/******************************************
+*  Includes
+******************************************/
+#include <stddef.h>    /* size_t, ptrdiff_t */
+#include "zstd_v01.h"
+#include "error_private.h"
+
+
+/******************************************
+*  Static allocation
+******************************************/
+/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */
+#define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))
+
+/* You can statically allocate Huff0 DTable as a table of unsigned short using below macro */
+#define HUF_DTABLE_SIZE_U16(maxTableLog)   (1 + (1<<maxTableLog))
+#define HUF_CREATE_STATIC_DTABLE(DTable, maxTableLog) \
+        unsigned short DTable[HUF_DTABLE_SIZE_U16(maxTableLog)] = { maxTableLog }
+
+
+/******************************************
+*  Error Management
+******************************************/
+#define FSE_LIST_ERRORS(ITEM) \
+        ITEM(FSE_OK_NoError) ITEM(FSE_ERROR_GENERIC) \
+        ITEM(FSE_ERROR_tableLog_tooLarge) ITEM(FSE_ERROR_maxSymbolValue_tooLarge) ITEM(FSE_ERROR_maxSymbolValue_tooSmall) \
+        ITEM(FSE_ERROR_dstSize_tooSmall) ITEM(FSE_ERROR_srcSize_wrong)\
+        ITEM(FSE_ERROR_corruptionDetected) \
+        ITEM(FSE_ERROR_maxCode)
+
+#define FSE_GENERATE_ENUM(ENUM) ENUM,
+typedef enum { FSE_LIST_ERRORS(FSE_GENERATE_ENUM) } FSE_errorCodes;  /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */
+
+
+/******************************************
+*  FSE symbol compression API
+******************************************/
+/*
+   This API consists of small unitary functions, which highly benefit from being inlined.
+   You will want to enable link-time-optimization to ensure these functions are properly inlined in your binary.
+   Visual seems to do it automatically.
+   For gcc or clang, you'll need to add -flto flag at compilation and linking stages.
+   If none of these solutions is applicable, include "fse.c" directly.
+*/
+
+typedef unsigned FSE_CTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
+typedef unsigned FSE_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
+
+typedef struct
+{
+    size_t bitContainer;
+    int    bitPos;
+    char*  startPtr;
+    char*  ptr;
+    char*  endPtr;
+} FSE_CStream_t;
+
+typedef struct
+{
+    ptrdiff_t   value;
+    const void* stateTable;
+    const void* symbolTT;
+    unsigned    stateLog;
+} FSE_CState_t;
+
+typedef struct
+{
+    size_t   bitContainer;
+    unsigned bitsConsumed;
+    const char* ptr;
+    const char* start;
+} FSE_DStream_t;
+
+typedef struct
+{
+    size_t      state;
+    const void* table;   /* precise table may vary, depending on U16 */
+} FSE_DState_t;
+
+typedef enum { FSE_DStream_unfinished = 0,
+               FSE_DStream_endOfBuffer = 1,
+               FSE_DStream_completed = 2,
+               FSE_DStream_tooFar = 3 } FSE_DStream_status;  /* result of FSE_reloadDStream() */
+               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... ?! */
+
+
+/****************************************************************
+*  Tuning parameters
+****************************************************************/
+/* MEMORY_USAGE :
+*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+*  Increasing memory usage improves compression ratio
+*  Reduced memory usage can improve speed, due to cache effect
+*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#define FSE_MAX_MEMORY_USAGE 14
+#define FSE_DEFAULT_MEMORY_USAGE 13
+
+/* FSE_MAX_SYMBOL_VALUE :
+*  Maximum symbol value authorized.
+*  Required for proper stack allocation */
+#define FSE_MAX_SYMBOL_VALUE 255
+
+
+/****************************************************************
+*  template functions type & suffix
+****************************************************************/
+#define FSE_FUNCTION_TYPE BYTE
+#define FSE_FUNCTION_EXTENSION
+
+
+/****************************************************************
+*  Byte symbol type
+****************************************************************/
+typedef struct
+{
+    unsigned short newState;
+    unsigned char  symbol;
+    unsigned char  nbBits;
+} FSE_decode_t;   /* size == U32 */
+
+
+
+/****************************************************************
+*  Compiler specifics
+****************************************************************/
+#ifdef _MSC_VER    /* Visual Studio */
+#  define FORCE_INLINE static __forceinline
+#  include <intrin.h>                    /* For Visual 2005 */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */
+#else
+#  define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
+#    ifdef __GNUC__
+#      define FORCE_INLINE static inline __attribute__((always_inline))
+#    else
+#      define FORCE_INLINE static inline
+#    endif
+#  else
+#    define FORCE_INLINE static
+#  endif /* __STDC_VERSION__ */
+#endif
+
+
+/****************************************************************
+*  Includes
+****************************************************************/
+#include <stdlib.h>     /* malloc, free, qsort */
+#include <string.h>     /* memcpy, memset */
+#include <stdio.h>      /* printf (debug) */
+
+
+#ifndef MEM_ACCESS_MODULE
+#define MEM_ACCESS_MODULE
+/****************************************************************
+*  Basic Types
+*****************************************************************/
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
+# include <stdint.h>
+typedef  uint8_t BYTE;
+typedef uint16_t U16;
+typedef  int16_t S16;
+typedef uint32_t U32;
+typedef  int32_t S32;
+typedef uint64_t U64;
+typedef  int64_t S64;
+#else
+typedef unsigned char       BYTE;
+typedef unsigned short      U16;
+typedef   signed short      S16;
+typedef unsigned int        U32;
+typedef   signed int        S32;
+typedef unsigned long long  U64;
+typedef   signed long long  S64;
+#endif
+
+#endif   /* MEM_ACCESS_MODULE */
+
+/****************************************************************
+*  Memory I/O
+*****************************************************************/
+/* FSE_FORCE_MEMORY_ACCESS
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ *            It can generate buggy code on targets generating assembly depending on alignment.
+ *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef FSE_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
+#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+#    define FSE_FORCE_MEMORY_ACCESS 2
+#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
+  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+#    define FSE_FORCE_MEMORY_ACCESS 1
+#  endif
+#endif
+
+
+static unsigned FSE_32bits(void)
+{
+    return sizeof(void*)==4;
+}
+
+static unsigned FSE_isLittleEndian(void)
+{
+    const union { U32 i; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */
+    return one.c[0];
+}
+
+#if defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==2)
+
+static U16 FSE_read16(const void* memPtr) { return *(const U16*) memPtr; }
+static U32 FSE_read32(const void* memPtr) { return *(const U32*) memPtr; }
+static U64 FSE_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+#elif defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;
+
+static U16 FSE_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
+static U32 FSE_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+static U64 FSE_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
+
+#else
+
+static U16 FSE_read16(const void* memPtr)
+{
+    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+static U32 FSE_read32(const void* memPtr)
+{
+    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+static U64 FSE_read64(const void* memPtr)
+{
+    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+#endif // FSE_FORCE_MEMORY_ACCESS
+
+static U16 FSE_readLE16(const void* memPtr)
+{
+    if (FSE_isLittleEndian())
+        return FSE_read16(memPtr);
+    else
+    {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U16)(p[0] + (p[1]<<8));
+    }
+}
+
+static U32 FSE_readLE32(const void* memPtr)
+{
+    if (FSE_isLittleEndian())
+        return FSE_read32(memPtr);
+    else
+    {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
+    }
+}
+
+
+static U64 FSE_readLE64(const void* memPtr)
+{
+    if (FSE_isLittleEndian())
+        return FSE_read64(memPtr);
+    else
+    {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
+                     + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
+    }
+}
+
+static size_t FSE_readLEST(const void* memPtr)
+{
+    if (FSE_32bits())
+        return (size_t)FSE_readLE32(memPtr);
+    else
+        return (size_t)FSE_readLE64(memPtr);
+}
+
+
+
+/****************************************************************
+*  Constants
+*****************************************************************/
+#define FSE_MAX_TABLELOG  (FSE_MAX_MEMORY_USAGE-2)
+#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
+#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
+#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
+#define FSE_MIN_TABLELOG 5
+
+#define FSE_TABLELOG_ABSOLUTE_MAX 15
+#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
+#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+
+/****************************************************************
+*  Error Management
+****************************************************************/
+#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+
+
+/****************************************************************
+*  Complex types
+****************************************************************/
+typedef struct
+{
+    int deltaFindState;
+    U32 deltaNbBits;
+} FSE_symbolCompressionTransform; /* total 8 bytes */
+
+typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
+
+/****************************************************************
+*  Internal functions
+****************************************************************/
+FORCE_INLINE unsigned FSE_highbit32 (U32 val)
+{
+#   if defined(_MSC_VER)   /* Visual */
+    unsigned long r;
+    _BitScanReverse ( &r, val );
+    return (unsigned) r;
+#   elif defined(__GNUC__) && (GCC_VERSION >= 304)   /* GCC Intrinsic */
+    return 31 - __builtin_clz (val);
+#   else   /* Software version */
+    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+    U32 v = val;
+    unsigned r;
+    v |= v >> 1;
+    v |= v >> 2;
+    v |= v >> 4;
+    v |= v >> 8;
+    v |= v >> 16;
+    r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+    return r;
+#   endif
+}
+
+
+/****************************************************************
+*  Templates
+****************************************************************/
+/*
+  designed to be included
+  for type-specific functions (template emulation in C)
+  Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+#  error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+#  error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+
+
+static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
+
+#define FSE_DECODE_TYPE FSE_decode_t
+
+
+typedef struct {
+    U16 tableLog;
+    U16 fastMode;
+} FSE_DTableHeader;   /* sizeof U32 */
+
+static size_t FSE_buildDTable
+(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+    void* ptr = dt;
+    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+    FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)(ptr) + 1;   /* because dt is unsigned, 32-bits aligned on 32-bits */
+    const U32 tableSize = 1 << tableLog;
+    const U32 tableMask = tableSize-1;
+    const U32 step = FSE_tableStep(tableSize);
+    U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
+    U32 position = 0;
+    U32 highThreshold = tableSize-1;
+    const S16 largeLimit= (S16)(1 << (tableLog-1));
+    U32 noLarge = 1;
+    U32 s;
+
+    /* Sanity Checks */
+    if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return (size_t)-FSE_ERROR_maxSymbolValue_tooLarge;
+    if (tableLog > FSE_MAX_TABLELOG) return (size_t)-FSE_ERROR_tableLog_tooLarge;
+
+    /* Init, lay down lowprob symbols */
+    DTableH[0].tableLog = (U16)tableLog;
+    for (s=0; s<=maxSymbolValue; s++)
+    {
+        if (normalizedCounter[s]==-1)
+        {
+            tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
+            symbolNext[s] = 1;
+        }
+        else
+        {
+            if (normalizedCounter[s] >= largeLimit) noLarge=0;
+            symbolNext[s] = normalizedCounter[s];
+        }
+    }
+
+    /* Spread symbols */
+    for (s=0; s<=maxSymbolValue; s++)
+    {
+        int i;
+        for (i=0; i<normalizedCounter[s]; i++)
+        {
+            tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
+            position = (position + step) & tableMask;
+            while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
+        }
+    }
+
+    if (position!=0) return (size_t)-FSE_ERROR_GENERIC;   /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+
+    /* Build Decoding table */
+    {
+        U32 i;
+        for (i=0; i<tableSize; i++)
+        {
+            FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);
+            U16 nextState = symbolNext[symbol]++;
+            tableDecode[i].nbBits = (BYTE) (tableLog - FSE_highbit32 ((U32)nextState) );
+            tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
+        }
+    }
+
+    DTableH->fastMode = (U16)noLarge;
+    return 0;
+}
+
+
+/******************************************
+*  FSE byte symbol
+******************************************/
+#ifndef FSE_COMMONDEFS_ONLY
+
+static unsigned FSE_isError(size_t code) { return (code > (size_t)(-FSE_ERROR_maxCode)); }
+
+static short FSE_abs(short a)
+{
+    return a<0? -a : a;
+}
+
+
+/****************************************************************
+*  Header bitstream management
+****************************************************************/
+static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+                 const void* headerBuffer, size_t hbSize)
+{
+    const BYTE* const istart = (const BYTE*) headerBuffer;
+    const BYTE* const iend = istart + hbSize;
+    const BYTE* ip = istart;
+    int nbBits;
+    int remaining;
+    int threshold;
+    U32 bitStream;
+    int bitCount;
+    unsigned charnum = 0;
+    int previous0 = 0;
+
+    if (hbSize < 4) return (size_t)-FSE_ERROR_srcSize_wrong;
+    bitStream = FSE_readLE32(ip);
+    nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */
+    if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return (size_t)-FSE_ERROR_tableLog_tooLarge;
+    bitStream >>= 4;
+    bitCount = 4;
+    *tableLogPtr = nbBits;
+    remaining = (1<<nbBits)+1;
+    threshold = 1<<nbBits;
+    nbBits++;
+
+    while ((remaining>1) && (charnum<=*maxSVPtr))
+    {
+        if (previous0)
+        {
+            unsigned n0 = charnum;
+            while ((bitStream & 0xFFFF) == 0xFFFF)
+            {
+                n0+=24;
+                if (ip < iend-5)
+                {
+                    ip+=2;
+                    bitStream = FSE_readLE32(ip) >> bitCount;
+                }
+                else
+                {
+                    bitStream >>= 16;
+                    bitCount+=16;
+                }
+            }
+            while ((bitStream & 3) == 3)
+            {
+                n0+=3;
+                bitStream>>=2;
+                bitCount+=2;
+            }
+            n0 += bitStream & 3;
+            bitCount += 2;
+            if (n0 > *maxSVPtr) return (size_t)-FSE_ERROR_maxSymbolValue_tooSmall;
+            while (charnum < n0) normalizedCounter[charnum++] = 0;
+            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
+            {
+                ip += bitCount>>3;
+                bitCount &= 7;
+                bitStream = FSE_readLE32(ip) >> bitCount;
+            }
+            else
+                bitStream >>= 2;
+        }
+        {
+            const short max = (short)((2*threshold-1)-remaining);
+            short count;
+
+            if ((bitStream & (threshold-1)) < (U32)max)
+            {
+                count = (short)(bitStream & (threshold-1));
+                bitCount   += nbBits-1;
+            }
+            else
+            {
+                count = (short)(bitStream & (2*threshold-1));
+                if (count >= threshold) count -= max;
+                bitCount   += nbBits;
+            }
+
+            count--;   /* extra accuracy */
+            remaining -= FSE_abs(count);
+            normalizedCounter[charnum++] = count;
+            previous0 = !count;
+            while (remaining < threshold)
+            {
+                nbBits--;
+                threshold >>= 1;
+            }
+
+            {
+                if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
+                {
+                    ip += bitCount>>3;
+                    bitCount &= 7;
+                }
+                else
+                {
+                    bitCount -= (int)(8 * (iend - 4 - ip));
+                    ip = iend - 4;
+                }
+                bitStream = FSE_readLE32(ip) >> (bitCount & 31);
+            }
+        }
+    }
+    if (remaining != 1) return (size_t)-FSE_ERROR_GENERIC;
+    *maxSVPtr = charnum-1;
+
+    ip += (bitCount+7)>>3;
+    if ((size_t)(ip-istart) > hbSize) return (size_t)-FSE_ERROR_srcSize_wrong;
+    return ip-istart;
+}
+
+
+/*********************************************************
+*  Decompression (Byte symbols)
+*********************************************************/
+static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
+{
+    void* ptr = dt;
+    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+    FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1;   /* because dt is unsigned */
+
+    DTableH->tableLog = 0;
+    DTableH->fastMode = 0;
+
+    cell->newState = 0;
+    cell->symbol = symbolValue;
+    cell->nbBits = 0;
+
+    return 0;
+}
+
+
+static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
+{
+    void* ptr = dt;
+    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+    FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1;   /* because dt is unsigned */
+    const unsigned tableSize = 1 << nbBits;
+    const unsigned tableMask = tableSize - 1;
+    const unsigned maxSymbolValue = tableMask;
+    unsigned s;
+
+    /* Sanity checks */
+    if (nbBits < 1) return (size_t)-FSE_ERROR_GENERIC;             /* min size */
+
+    /* Build Decoding Table */
+    DTableH->tableLog = (U16)nbBits;
+    DTableH->fastMode = 1;
+    for (s=0; s<=maxSymbolValue; s++)
+    {
+        dinfo[s].newState = 0;
+        dinfo[s].symbol = (BYTE)s;
+        dinfo[s].nbBits = (BYTE)nbBits;
+    }
+
+    return 0;
+}
+
+
+/* FSE_initDStream
+ * Initialize a FSE_DStream_t.
+ * srcBuffer must point at the beginning of an FSE block.
+ * The function result is the size of the FSE_block (== srcSize).
+ * If srcSize is too small, the function will return an errorCode;
+ */
+static size_t FSE_initDStream(FSE_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
+{
+    if (srcSize < 1) return (size_t)-FSE_ERROR_srcSize_wrong;
+
+    if (srcSize >=  sizeof(size_t))
+    {
+        U32 contain32;
+        bitD->start = (const char*)srcBuffer;
+        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(size_t);
+        bitD->bitContainer = FSE_readLEST(bitD->ptr);
+        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
+        if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC;   /* stop bit not present */
+        bitD->bitsConsumed = 8 - FSE_highbit32(contain32);
+    }
+    else
+    {
+        U32 contain32;
+        bitD->start = (const char*)srcBuffer;
+        bitD->ptr   = bitD->start;
+        bitD->bitContainer = *(const BYTE*)(bitD->start);
+        switch(srcSize)
+        {
+            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);
+            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);
+            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);
+            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;
+            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;
+            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) <<  8;
+            default:;
+        }
+        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
+        if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC;   /* stop bit not present */
+        bitD->bitsConsumed = 8 - FSE_highbit32(contain32);
+        bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
+    }
+
+    return srcSize;
+}
+
+
+/*!FSE_lookBits
+ * Provides next n bits from the bitContainer.
+ * bitContainer is not modified (bits are still present for next read/look)
+ * On 32-bits, maxNbBits==25
+ * On 64-bits, maxNbBits==57
+ * return : value extracted.
+ */
+static size_t FSE_lookBits(FSE_DStream_t* bitD, U32 nbBits)
+{
+    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
+    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
+}
+
+static size_t FSE_lookBitsFast(FSE_DStream_t* bitD, U32 nbBits)   /* only if nbBits >= 1 !! */
+{
+    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
+    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
+}
+
+static void FSE_skipBits(FSE_DStream_t* bitD, U32 nbBits)
+{
+    bitD->bitsConsumed += nbBits;
+}
+
+
+/*!FSE_readBits
+ * Read next n bits from the bitContainer.
+ * On 32-bits, don't read more than maxNbBits==25
+ * On 64-bits, don't read more than maxNbBits==57
+ * Use the fast variant *only* if n >= 1.
+ * return : value extracted.
+ */
+static size_t FSE_readBits(FSE_DStream_t* bitD, U32 nbBits)
+{
+    size_t value = FSE_lookBits(bitD, nbBits);
+    FSE_skipBits(bitD, nbBits);
+    return value;
+}
+
+static size_t FSE_readBitsFast(FSE_DStream_t* bitD, U32 nbBits)   /* only if nbBits >= 1 !! */
+{
+    size_t value = FSE_lookBitsFast(bitD, nbBits);
+    FSE_skipBits(bitD, nbBits);
+    return value;
+}
+
+static unsigned FSE_reloadDStream(FSE_DStream_t* bitD)
+{
+    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
+        return FSE_DStream_tooFar;
+
+    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
+    {
+        bitD->ptr -= bitD->bitsConsumed >> 3;
+        bitD->bitsConsumed &= 7;
+        bitD->bitContainer = FSE_readLEST(bitD->ptr);
+        return FSE_DStream_unfinished;
+    }
+    if (bitD->ptr == bitD->start)
+    {
+        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return FSE_DStream_endOfBuffer;
+        return FSE_DStream_completed;
+    }
+    {
+        U32 nbBytes = bitD->bitsConsumed >> 3;
+        U32 result = FSE_DStream_unfinished;
+        if (bitD->ptr - nbBytes < bitD->start)
+        {
+            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */
+            result = FSE_DStream_endOfBuffer;
+        }
+        bitD->ptr -= nbBytes;
+        bitD->bitsConsumed -= nbBytes*8;
+        bitD->bitContainer = FSE_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */
+        return result;
+    }
+}
+
+
+static void FSE_initDState(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD, const FSE_DTable* dt)
+{
+    const void* ptr = dt;
+    const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
+    DStatePtr->state = FSE_readBits(bitD, DTableH->tableLog);
+    FSE_reloadDStream(bitD);
+    DStatePtr->table = dt + 1;
+}
+
+static BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD)
+{
+    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    const U32  nbBits = DInfo.nbBits;
+    BYTE symbol = DInfo.symbol;
+    size_t lowBits = FSE_readBits(bitD, nbBits);
+
+    DStatePtr->state = DInfo.newState + lowBits;
+    return symbol;
+}
+
+static BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD)
+{
+    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    const U32 nbBits = DInfo.nbBits;
+    BYTE symbol = DInfo.symbol;
+    size_t lowBits = FSE_readBitsFast(bitD, nbBits);
+
+    DStatePtr->state = DInfo.newState + lowBits;
+    return symbol;
+}
+
+/* FSE_endOfDStream
+   Tells if bitD has reached end of bitStream or not */
+
+static unsigned FSE_endOfDStream(const FSE_DStream_t* bitD)
+{
+    return ((bitD->ptr == bitD->start) && (bitD->bitsConsumed == sizeof(bitD->bitContainer)*8));
+}
+
+static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
+{
+    return DStatePtr->state == 0;
+}
+
+
+FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
+          void* dst, size_t maxDstSize,
+    const void* cSrc, size_t cSrcSize,
+    const FSE_DTable* dt, const unsigned fast)
+{
+    BYTE* const ostart = (BYTE*) dst;
+    BYTE* op = ostart;
+    BYTE* const omax = op + maxDstSize;
+    BYTE* const olimit = omax-3;
+
+    FSE_DStream_t bitD;
+    FSE_DState_t state1;
+    FSE_DState_t state2;
+    size_t errorCode;
+
+    /* Init */
+    errorCode = FSE_initDStream(&bitD, cSrc, cSrcSize);   /* replaced last arg by maxCompressed Size */
+    if (FSE_isError(errorCode)) return errorCode;
+
+    FSE_initDState(&state1, &bitD, dt);
+    FSE_initDState(&state2, &bitD, dt);
+
+#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
+
+    /* 4 symbols per loop */
+    for ( ; (FSE_reloadDStream(&bitD)==FSE_DStream_unfinished) && (op<olimit) ; op+=4)
+    {
+        op[0] = FSE_GETSYMBOL(&state1);
+
+        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            FSE_reloadDStream(&bitD);
+
+        op[1] = FSE_GETSYMBOL(&state2);
+
+        if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            { if (FSE_reloadDStream(&bitD) > FSE_DStream_unfinished) { op+=2; break; } }
+
+        op[2] = FSE_GETSYMBOL(&state1);
+
+        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            FSE_reloadDStream(&bitD);
+
+        op[3] = FSE_GETSYMBOL(&state2);
+    }
+
+    /* tail */
+    /* note : FSE_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly FSE_DStream_completed */
+    while (1)
+    {
+        if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )
+            break;
+
+        *op++ = FSE_GETSYMBOL(&state1);
+
+        if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )
+            break;
+
+        *op++ = FSE_GETSYMBOL(&state2);
+    }
+
+    /* end ? */
+    if (FSE_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))
+        return op-ostart;
+
+    if (op==omax) return (size_t)-FSE_ERROR_dstSize_tooSmall;   /* dst buffer is full, but cSrc unfinished */
+
+    return (size_t)-FSE_ERROR_corruptionDetected;
+}
+
+
+static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
+                            const void* cSrc, size_t cSrcSize,
+                            const FSE_DTable* dt)
+{
+    FSE_DTableHeader DTableH;
+    memcpy(&DTableH, dt, sizeof(DTableH));   /* memcpy() into local variable, to avoid strict aliasing warning */
+
+    /* select fast mode (static) */
+    if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+    return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+
+static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
+{
+    const BYTE* const istart = (const BYTE*)cSrc;
+    const BYTE* ip = istart;
+    short counting[FSE_MAX_SYMBOL_VALUE+1];
+    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */
+    unsigned tableLog;
+    unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+    size_t errorCode;
+
+    if (cSrcSize<2) return (size_t)-FSE_ERROR_srcSize_wrong;   /* too small input size */
+
+    /* normal FSE decoding mode */
+    errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
+    if (FSE_isError(errorCode)) return errorCode;
+    if (errorCode >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong;   /* too small input size */
+    ip += errorCode;
+    cSrcSize -= errorCode;
+
+    errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);
+    if (FSE_isError(errorCode)) return errorCode;
+
+    /* always return, even if it is an error code */
+    return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
+}
+
+
+
+/* *******************************************************
+*  Huff0 : Huffman block compression
+*********************************************************/
+#define HUF_MAX_SYMBOL_VALUE 255
+#define HUF_DEFAULT_TABLELOG  12       /* used by default, when not specified */
+#define HUF_MAX_TABLELOG  12           /* max possible tableLog; for allocation purpose; can be modified */
+#define HUF_ABSOLUTEMAX_TABLELOG  16   /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
+#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)
+#  error "HUF_MAX_TABLELOG is too large !"
+#endif
+
+typedef struct HUF_CElt_s {
+  U16  val;
+  BYTE nbBits;
+} HUF_CElt ;
+
+typedef struct nodeElt_s {
+    U32 count;
+    U16 parent;
+    BYTE byte;
+    BYTE nbBits;
+} nodeElt;
+
+
+/* *******************************************************
+*  Huff0 : Huffman block decompression
+*********************************************************/
+typedef struct {
+    BYTE byte;
+    BYTE nbBits;
+} HUF_DElt;
+
+static size_t HUF_readDTable (U16* DTable, const void* src, size_t srcSize)
+{
+    BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];
+    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];  /* large enough for values from 0 to 16 */
+    U32 weightTotal;
+    U32 maxBits;
+    const BYTE* ip = (const BYTE*) src;
+    size_t iSize;
+    size_t oSize;
+    U32 n;
+    U32 nextRankStart;
+    void* ptr = DTable+1;
+    HUF_DElt* const dt = (HUF_DElt*)ptr;
+
+    if (!srcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
+    iSize = ip[0];
+
+    FSE_STATIC_ASSERT(sizeof(HUF_DElt) == sizeof(U16));   /* if compilation fails here, assertion is false */
+    //memset(huffWeight, 0, sizeof(huffWeight));   /* should not be necessary, but some analyzer complain ... */
+    if (iSize >= 128)  /* special header */
+    {
+        if (iSize >= (242))   /* RLE */
+        {
+            static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
+            oSize = l[iSize-242];
+            memset(huffWeight, 1, sizeof(huffWeight));
+            iSize = 0;
+        }
+        else   /* Incompressible */
+        {
+            oSize = iSize - 127;
+            iSize = ((oSize+1)/2);
+            if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
+            ip += 1;
+            for (n=0; n<oSize; n+=2)
+            {
+                huffWeight[n]   = ip[n/2] >> 4;
+                huffWeight[n+1] = ip[n/2] & 15;
+            }
+        }
+    }
+    else  /* header compressed with FSE (normal case) */
+    {
+        if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
+        oSize = FSE_decompress(huffWeight, HUF_MAX_SYMBOL_VALUE, ip+1, iSize);   /* max 255 values decoded, last one is implied */
+        if (FSE_isError(oSize)) return oSize;
+    }
+
+    /* collect weight stats */
+    memset(rankVal, 0, sizeof(rankVal));
+    weightTotal = 0;
+    for (n=0; n<oSize; n++)
+    {
+        if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return (size_t)-FSE_ERROR_corruptionDetected;
+        rankVal[huffWeight[n]]++;
+        weightTotal += (1 << huffWeight[n]) >> 1;
+    }
+    if (weightTotal == 0) return (size_t)-FSE_ERROR_corruptionDetected;
+
+    /* get last non-null symbol weight (implied, total must be 2^n) */
+    maxBits = FSE_highbit32(weightTotal) + 1;
+    if (maxBits > DTable[0]) return (size_t)-FSE_ERROR_tableLog_tooLarge;   /* DTable is too small */
+    DTable[0] = (U16)maxBits;
+    {
+        U32 total = 1 << maxBits;
+        U32 rest = total - weightTotal;
+        U32 verif = 1 << FSE_highbit32(rest);
+        U32 lastWeight = FSE_highbit32(rest) + 1;
+        if (verif != rest) return (size_t)-FSE_ERROR_corruptionDetected;    /* last value must be a clean power of 2 */
+        huffWeight[oSize] = (BYTE)lastWeight;
+        rankVal[lastWeight]++;
+    }
+
+    /* check tree construction validity */
+    if ((rankVal[1] < 2) || (rankVal[1] & 1)) return (size_t)-FSE_ERROR_corruptionDetected;   /* by construction : at least 2 elts of rank 1, must be even */
+
+    /* Prepare ranks */
+    nextRankStart = 0;
+    for (n=1; n<=maxBits; n++)
+    {
+        U32 current = nextRankStart;
+        nextRankStart += (rankVal[n] << (n-1));
+        rankVal[n] = current;
+    }
+
+    /* fill DTable */
+    for (n=0; n<=oSize; n++)
+    {
+        const U32 w = huffWeight[n];
+        const U32 length = (1 << w) >> 1;
+        U32 i;
+        HUF_DElt D;
+        D.byte = (BYTE)n; D.nbBits = (BYTE)(maxBits + 1 - w);
+        for (i = rankVal[w]; i < rankVal[w] + length; i++)
+            dt[i] = D;
+        rankVal[w] += length;
+    }
+
+    return iSize+1;
+}
+
+
+static BYTE HUF_decodeSymbol(FSE_DStream_t* Dstream, const HUF_DElt* dt, const U32 dtLog)
+{
+        const size_t val = FSE_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+        const BYTE c = dt[val].byte;
+        FSE_skipBits(Dstream, dt[val].nbBits);
+        return c;
+}
+
+static size_t HUF_decompress_usingDTable(   /* -3% slower when non static */
+          void* dst, size_t maxDstSize,
+    const void* cSrc, size_t cSrcSize,
+    const U16* DTable)
+{
+    BYTE* const ostart = (BYTE*) dst;
+    BYTE* op = ostart;
+    BYTE* const omax = op + maxDstSize;
+    BYTE* const olimit = omax-15;
+
+    const void* ptr = DTable;
+    const HUF_DElt* const dt = (const HUF_DElt*)(ptr)+1;
+    const U32 dtLog = DTable[0];
+    size_t errorCode;
+    U32 reloadStatus;
+
+    /* Init */
+
+    const U16* jumpTable = (const U16*)cSrc;
+    const size_t length1 = FSE_readLE16(jumpTable);
+    const size_t length2 = FSE_readLE16(jumpTable+1);
+    const size_t length3 = FSE_readLE16(jumpTable+2);
+    const size_t length4 = cSrcSize - 6 - length1 - length2 - length3;   // check coherency !!
+    const char* const start1 = (const char*)(cSrc) + 6;
+    const char* const start2 = start1 + length1;
+    const char* const start3 = start2 + length2;
+    const char* const start4 = start3 + length3;
+    FSE_DStream_t bitD1, bitD2, bitD3, bitD4;
+
+    if (length1+length2+length3+6 >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
+
+    errorCode = FSE_initDStream(&bitD1, start1, length1);
+    if (FSE_isError(errorCode)) return errorCode;
+    errorCode = FSE_initDStream(&bitD2, start2, length2);
+    if (FSE_isError(errorCode)) return errorCode;
+    errorCode = FSE_initDStream(&bitD3, start3, length3);
+    if (FSE_isError(errorCode)) return errorCode;
+    errorCode = FSE_initDStream(&bitD4, start4, length4);
+    if (FSE_isError(errorCode)) return errorCode;
+
+    reloadStatus=FSE_reloadDStream(&bitD2);
+
+    /* 16 symbols per loop */
+    for ( ; (reloadStatus<FSE_DStream_completed) && (op<olimit);  /* D2-3-4 are supposed to be synchronized and finish together */
+        op+=16, reloadStatus = FSE_reloadDStream(&bitD2) | FSE_reloadDStream(&bitD3) | FSE_reloadDStream(&bitD4), FSE_reloadDStream(&bitD1))
+    {
+#define HUF_DECODE_SYMBOL_0(n, Dstream) \
+        op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog);
+
+#define HUF_DECODE_SYMBOL_1(n, Dstream) \
+        op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog); \
+        if (FSE_32bits() && (HUF_MAX_TABLELOG>12)) FSE_reloadDStream(&Dstream)
+
+#define HUF_DECODE_SYMBOL_2(n, Dstream) \
+        op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog); \
+        if (FSE_32bits()) FSE_reloadDStream(&Dstream)
+
+        HUF_DECODE_SYMBOL_1( 0, bitD1);
+        HUF_DECODE_SYMBOL_1( 1, bitD2);
+        HUF_DECODE_SYMBOL_1( 2, bitD3);
+        HUF_DECODE_SYMBOL_1( 3, bitD4);
+        HUF_DECODE_SYMBOL_2( 4, bitD1);
+        HUF_DECODE_SYMBOL_2( 5, bitD2);
+        HUF_DECODE_SYMBOL_2( 6, bitD3);
+        HUF_DECODE_SYMBOL_2( 7, bitD4);
+        HUF_DECODE_SYMBOL_1( 8, bitD1);
+        HUF_DECODE_SYMBOL_1( 9, bitD2);
+        HUF_DECODE_SYMBOL_1(10, bitD3);
+        HUF_DECODE_SYMBOL_1(11, bitD4);
+        HUF_DECODE_SYMBOL_0(12, bitD1);
+        HUF_DECODE_SYMBOL_0(13, bitD2);
+        HUF_DECODE_SYMBOL_0(14, bitD3);
+        HUF_DECODE_SYMBOL_0(15, bitD4);
+    }
+
+    if (reloadStatus!=FSE_DStream_completed)   /* not complete : some bitStream might be FSE_DStream_unfinished */
+        return (size_t)-FSE_ERROR_corruptionDetected;
+
+    /* tail */
+    {
+        // bitTail = bitD1;   // *much* slower : -20% !??!
+        FSE_DStream_t bitTail;
+        bitTail.ptr = bitD1.ptr;
+        bitTail.bitsConsumed = bitD1.bitsConsumed;
+        bitTail.bitContainer = bitD1.bitContainer;   // required in case of FSE_DStream_endOfBuffer
+        bitTail.start = start1;
+        for ( ; (FSE_reloadDStream(&bitTail) < FSE_DStream_completed) && (op<omax) ; op++)
+        {
+            HUF_DECODE_SYMBOL_0(0, bitTail);
+        }
+
+        if (FSE_endOfDStream(&bitTail))
+            return op-ostart;
+    }
+
+    if (op==omax) return (size_t)-FSE_ERROR_dstSize_tooSmall;   /* dst buffer is full, but cSrc unfinished */
+
+    return (size_t)-FSE_ERROR_corruptionDetected;
+}
+
+
+static size_t HUF_decompress (void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUF_CREATE_STATIC_DTABLE(DTable, HUF_MAX_TABLELOG);
+    const BYTE* ip = (const BYTE*) cSrc;
+    size_t errorCode;
+
+    errorCode = HUF_readDTable (DTable, cSrc, cSrcSize);
+    if (FSE_isError(errorCode)) return errorCode;
+    if (errorCode >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
+    ip += errorCode;
+    cSrcSize -= errorCode;
+
+    return HUF_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, DTable);
+}
+
+
+#endif   /* FSE_COMMONDEFS_ONLY */
+
+/*
+    zstd - standard compression library
+    Copyright (C) 2014-2015, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd source repository : https://github.com/Cyan4973/zstd
+    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+/****************************************************************
+*  Tuning parameters
+*****************************************************************/
+/* MEMORY_USAGE :
+*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+*  Increasing memory usage improves compression ratio
+*  Reduced memory usage can improve speed, due to cache effect */
+#define ZSTD_MEMORY_USAGE 17
+
+
+/**************************************
+   CPU Feature Detection
+**************************************/
+/*
+ * Automated efficient unaligned memory access detection
+ * Based on known hardware architectures
+ * This list will be updated thanks to feedbacks
+ */
+#if defined(CPU_HAS_EFFICIENT_UNALIGNED_MEMORY_ACCESS) \
+    || defined(__ARM_FEATURE_UNALIGNED) \
+    || defined(__i386__) || defined(__x86_64__) \
+    || defined(_M_IX86) || defined(_M_X64) \
+    || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_8__) \
+    || (defined(_M_ARM) && (_M_ARM >= 7))
+#  define ZSTD_UNALIGNED_ACCESS 1
+#else
+#  define ZSTD_UNALIGNED_ACCESS 0
+#endif
+
+
+/********************************************************
+*  Includes
+*********************************************************/
+#include <stdlib.h>      /* calloc */
+#include <string.h>      /* memcpy, memmove */
+#include <stdio.h>       /* debug : printf */
+
+
+/********************************************************
+*  Compiler specifics
+*********************************************************/
+#ifdef __AVX2__
+#  include <immintrin.h>   /* AVX2 intrinsics */
+#endif
+
+#ifdef _MSC_VER    /* Visual Studio */
+#  include <intrin.h>                    /* For Visual 2005 */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */
+#endif
+
+
+#ifndef MEM_ACCESS_MODULE
+#define MEM_ACCESS_MODULE
+/********************************************************
+*  Basic Types
+*********************************************************/
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
+# include <stdint.h>
+typedef  uint8_t BYTE;
+typedef uint16_t U16;
+typedef  int16_t S16;
+typedef uint32_t U32;
+typedef  int32_t S32;
+typedef uint64_t U64;
+#else
+typedef unsigned char       BYTE;
+typedef unsigned short      U16;
+typedef   signed short      S16;
+typedef unsigned int        U32;
+typedef   signed int        S32;
+typedef unsigned long long  U64;
+#endif
+
+#endif   /* MEM_ACCESS_MODULE */
+
+
+/********************************************************
+*  Constants
+*********************************************************/
+static const U32 ZSTD_magicNumber = 0xFD2FB51E;   /* 3rd version : seqNb header */
+
+#define HASH_LOG (ZSTD_MEMORY_USAGE - 2)
+#define HASH_TABLESIZE (1 << HASH_LOG)
+#define HASH_MASK (HASH_TABLESIZE - 1)
+
+#define KNUTH 2654435761
+
+#define BIT7 128
+#define BIT6  64
+#define BIT5  32
+#define BIT4  16
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BLOCKSIZE (128 KB)                 /* define, for static allocation */
+
+#define WORKPLACESIZE (BLOCKSIZE*3)
+#define MINMATCH 4
+#define MLbits   7
+#define LLbits   6
+#define Offbits  5
+#define MaxML  ((1<<MLbits )-1)
+#define MaxLL  ((1<<LLbits )-1)
+#define MaxOff ((1<<Offbits)-1)
+#define LitFSELog  11
+#define MLFSELog   10
+#define LLFSELog   10
+#define OffFSELog   9
+#define MAX(a,b) ((a)<(b)?(b):(a))
+#define MaxSeq MAX(MaxLL, MaxML)
+
+#define LITERAL_NOENTROPY 63
+#define COMMAND_NOENTROPY 7   /* to remove */
+
+static const size_t ZSTD_blockHeaderSize = 3;
+static const size_t ZSTD_frameHeaderSize = 4;
+
+
+/********************************************************
+*  Memory operations
+*********************************************************/
+static unsigned ZSTD_32bits(void) { return sizeof(void*)==4; }
+
+static unsigned ZSTD_isLittleEndian(void)
+{
+    const union { U32 i; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */
+    return one.c[0];
+}
+
+static U16    ZSTD_read16(const void* p) { U16 r; memcpy(&r, p, sizeof(r)); return r; }
+
+static U32    ZSTD_read32(const void* p) { U32 r; memcpy(&r, p, sizeof(r)); return r; }
+
+static void   ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
+
+static void   ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
+
+#define COPY8(d,s)    { ZSTD_copy8(d,s); d+=8; s+=8; }
+
+static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
+{
+    const BYTE* ip = (const BYTE*)src;
+    BYTE* op = (BYTE*)dst;
+    BYTE* const oend = op + length;
+    while (op < oend) COPY8(op, ip);
+}
+
+static U16 ZSTD_readLE16(const void* memPtr)
+{
+    if (ZSTD_isLittleEndian()) return ZSTD_read16(memPtr);
+    else
+    {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U16)((U16)p[0] + ((U16)p[1]<<8));
+    }
+}
+
+
+static U32 ZSTD_readLE32(const void* memPtr)
+{
+    if (ZSTD_isLittleEndian())
+        return ZSTD_read32(memPtr);
+    else
+    {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
+    }
+}
+
+static U32 ZSTD_readBE32(const void* memPtr)
+{
+    const BYTE* p = (const BYTE*)memPtr;
+    return (U32)(((U32)p[0]<<24) + ((U32)p[1]<<16) + ((U32)p[2]<<8) + ((U32)p[3]<<0));
+}
+
+
+/**************************************
+*  Local structures
+***************************************/
+typedef struct ZSTD_Cctx_s ZSTD_Cctx;
+
+typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
+
+typedef struct
+{
+    blockType_t blockType;
+    U32 origSize;
+} blockProperties_t;
+
+typedef struct {
+    void* buffer;
+    U32*  offsetStart;
+    U32*  offset;
+    BYTE* offCodeStart;
+    BYTE* offCode;
+    BYTE* litStart;
+    BYTE* lit;
+    BYTE* litLengthStart;
+    BYTE* litLength;
+    BYTE* matchLengthStart;
+    BYTE* matchLength;
+    BYTE* dumpsStart;
+    BYTE* dumps;
+} seqStore_t;
+
+
+typedef struct ZSTD_Cctx_s
+{
+    const BYTE* base;
+    U32 current;
+    U32 nextUpdate;
+    seqStore_t seqStore;
+#ifdef __AVX2__
+    __m256i hashTable[HASH_TABLESIZE>>3];
+#else
+    U32 hashTable[HASH_TABLESIZE];
+#endif
+    BYTE buffer[WORKPLACESIZE];
+} cctxi_t;
+
+
+
+
+/**************************************
+*  Error Management
+**************************************/
+/* published entry point */
+unsigned ZSTDv01_isError(size_t code) { return ERR_isError(code); }
+
+
+/**************************************
+*  Tool functions
+**************************************/
+#define ZSTD_VERSION_MAJOR    0    /* for breaking interface changes  */
+#define ZSTD_VERSION_MINOR    1    /* for new (non-breaking) interface capabilities */
+#define ZSTD_VERSION_RELEASE  3    /* for tweaks, bug-fixes, or development */
+#define ZSTD_VERSION_NUMBER  (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
+
+/**************************************************************
+*   Decompression code
+**************************************************************/
+
+size_t ZSTDv01_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
+{
+    const BYTE* const in = (const BYTE* const)src;
+    BYTE headerFlags;
+    U32 cSize;
+
+    if (srcSize < 3) return ERROR(srcSize_wrong);
+
+    headerFlags = *in;
+    cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
+
+    bpPtr->blockType = (blockType_t)(headerFlags >> 6);
+    bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
+
+    if (bpPtr->blockType == bt_end) return 0;
+    if (bpPtr->blockType == bt_rle) return 1;
+    return cSize;
+}
+
+
+static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
+    memcpy(dst, src, srcSize);
+    return srcSize;
+}
+
+
+static size_t ZSTD_decompressLiterals(void* ctx,
+                                      void* dst, size_t maxDstSize,
+                                const void* src, size_t srcSize)
+{
+    BYTE* op = (BYTE*)dst;
+    BYTE* const oend = op + maxDstSize;
+    const BYTE* ip = (const BYTE*)src;
+    size_t errorCode;
+    size_t litSize;
+
+    /* check : minimum 2, for litSize, +1, for content */
+    if (srcSize <= 3) return ERROR(corruption_detected);
+
+    litSize = ip[1] + (ip[0]<<8);
+    litSize += ((ip[-3] >> 3) & 7) << 16;   // mmmmh....
+    op = oend - litSize;
+
+    (void)ctx;
+    if (litSize > maxDstSize) return ERROR(dstSize_tooSmall);
+    errorCode = HUF_decompress(op, litSize, ip+2, srcSize-2);
+    if (FSE_isError(errorCode)) return ERROR(GENERIC);
+    return litSize;
+}
+
+
+size_t ZSTDv01_decodeLiteralsBlock(void* ctx,
+                                void* dst, size_t maxDstSize,
+                          const BYTE** litStart, size_t* litSize,
+                          const void* src, size_t srcSize)
+{
+    const BYTE* const istart = (const BYTE* const)src;
+    const BYTE* ip = istart;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* const oend = ostart + maxDstSize;
+    blockProperties_t litbp;
+
+    size_t litcSize = ZSTDv01_getcBlockSize(src, srcSize, &litbp);
+    if (ZSTDv01_isError(litcSize)) return litcSize;
+    if (litcSize > srcSize - ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+    ip += ZSTD_blockHeaderSize;
+
+    switch(litbp.blockType)
+    {
+    case bt_raw:
+        *litStart = ip;
+        ip += litcSize;
+        *litSize = litcSize;
+        break;
+    case bt_rle:
+        {
+            size_t rleSize = litbp.origSize;
+            if (rleSize>maxDstSize) return ERROR(dstSize_tooSmall);
+            if (!srcSize) return ERROR(srcSize_wrong);
+            memset(oend - rleSize, *ip, rleSize);
+            *litStart = oend - rleSize;
+            *litSize = rleSize;
+            ip++;
+            break;
+        }
+    case bt_compressed:
+        {
+            size_t decodedLitSize = ZSTD_decompressLiterals(ctx, dst, maxDstSize, ip, litcSize);
+            if (ZSTDv01_isError(decodedLitSize)) return decodedLitSize;
+            *litStart = oend - decodedLitSize;
+            *litSize = decodedLitSize;
+            ip += litcSize;
+            break;
+        }
+    case bt_end:
+    default:
+        return ERROR(GENERIC);
+    }
+
+    return ip-istart;
+}
+
+
+size_t ZSTDv01_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
+                         FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,
+                         const void* src, size_t srcSize)
+{
+    const BYTE* const istart = (const BYTE* const)src;
+    const BYTE* ip = istart;
+    const BYTE* const iend = istart + srcSize;
+    U32 LLtype, Offtype, MLtype;
+    U32 LLlog, Offlog, MLlog;
+    size_t dumpsLength;
+
+    /* check */
+    if (srcSize < 5) return ERROR(srcSize_wrong);
+
+    /* SeqHead */
+    *nbSeq = ZSTD_readLE16(ip); ip+=2;
+    LLtype  = *ip >> 6;
+    Offtype = (*ip >> 4) & 3;
+    MLtype  = (*ip >> 2) & 3;
+    if (*ip & 2)
+    {
+        dumpsLength  = ip[2];
+        dumpsLength += ip[1] << 8;
+        ip += 3;
+    }
+    else
+    {
+        dumpsLength  = ip[1];
+        dumpsLength += (ip[0] & 1) << 8;
+        ip += 2;
+    }
+    *dumpsPtr = ip;
+    ip += dumpsLength;
+    *dumpsLengthPtr = dumpsLength;
+
+    /* check */
+    if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
+
+    /* sequences */
+    {
+        S16 norm[MaxML+1];    /* assumption : MaxML >= MaxLL and MaxOff */
+        size_t headerSize;
+
+        /* Build DTables */
+        switch(LLtype)
+        {
+        case bt_rle :
+            LLlog = 0;
+            FSE_buildDTable_rle(DTableLL, *ip++); break;
+        case bt_raw :
+            LLlog = LLbits;
+            FSE_buildDTable_raw(DTableLL, LLbits); break;
+        default :
+            {   U32 max = MaxLL;
+                headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);
+                if (FSE_isError(headerSize)) return ERROR(GENERIC);
+                if (LLlog > LLFSELog) return ERROR(corruption_detected);
+                ip += headerSize;
+                FSE_buildDTable(DTableLL, norm, max, LLlog);
+        }   }
+
+        switch(Offtype)
+        {
+        case bt_rle :
+            Offlog = 0;
+            if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
+            FSE_buildDTable_rle(DTableOffb, *ip++); break;
+        case bt_raw :
+            Offlog = Offbits;
+            FSE_buildDTable_raw(DTableOffb, Offbits); break;
+        default :
+            {   U32 max = MaxOff;
+                headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);
+                if (FSE_isError(headerSize)) return ERROR(GENERIC);
+                if (Offlog > OffFSELog) return ERROR(corruption_detected);
+                ip += headerSize;
+                FSE_buildDTable(DTableOffb, norm, max, Offlog);
+        }   }
+
+        switch(MLtype)
+        {
+        case bt_rle :
+            MLlog = 0;
+            if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
+            FSE_buildDTable_rle(DTableML, *ip++); break;
+        case bt_raw :
+            MLlog = MLbits;
+            FSE_buildDTable_raw(DTableML, MLbits); break;
+        default :
+            {   U32 max = MaxML;
+                headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);
+                if (FSE_isError(headerSize)) return ERROR(GENERIC);
+                if (MLlog > MLFSELog) return ERROR(corruption_detected);
+                ip += headerSize;
+                FSE_buildDTable(DTableML, norm, max, MLlog);
+    }   }   }
+
+    return ip-istart;
+}
+
+
+typedef struct {
+    size_t litLength;
+    size_t offset;
+    size_t matchLength;
+} seq_t;
+
+typedef struct {
+    FSE_DStream_t DStream;
+    FSE_DState_t stateLL;
+    FSE_DState_t stateOffb;
+    FSE_DState_t stateML;
+    size_t prevOffset;
+    const BYTE* dumps;
+    const BYTE* dumpsEnd;
+} seqState_t;
+
+
+static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
+{
+    size_t litLength;
+    size_t prevOffset;
+    size_t offset;
+    size_t matchLength;
+    const BYTE* dumps = seqState->dumps;
+    const BYTE* const de = seqState->dumpsEnd;
+
+    /* Literal length */
+    litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
+    prevOffset = litLength ? seq->offset : seqState->prevOffset;
+    seqState->prevOffset = seq->offset;
+    if (litLength == MaxLL)
+    {
+        U32 add = dumps<de ? *dumps++ : 0;
+        if (add < 255) litLength += add;
+        else
+        {
+            if (dumps<=(de-3))
+            {
+                litLength = ZSTD_readLE32(dumps) & 0xFFFFFF;  /* no pb : dumps is always followed by seq tables > 1 byte */
+                dumps += 3;
+            }
+        }
+    }
+
+    /* Offset */
+    {
+        U32 offsetCode, nbBits;
+        offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream));
+        if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream));
+        nbBits = offsetCode - 1;
+        if (offsetCode==0) nbBits = 0;   /* cmove */
+        offset = ((size_t)1 << (nbBits & ((sizeof(offset)*8)-1))) + FSE_readBits(&(seqState->DStream), nbBits);
+        if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream));
+        if (offsetCode==0) offset = prevOffset;
+    }
+
+    /* MatchLength */
+    matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
+    if (matchLength == MaxML)
+    {
+        U32 add = dumps<de ? *dumps++ : 0;
+        if (add < 255) matchLength += add;
+        else
+        {
+            if (dumps<=(de-3))
+            {
+                matchLength = ZSTD_readLE32(dumps) & 0xFFFFFF;  /* no pb : dumps is always followed by seq tables > 1 byte */
+                dumps += 3;
+            }
+        }
+    }
+    matchLength += MINMATCH;
+
+    /* save result */
+    seq->litLength = litLength;
+    seq->offset = offset;
+    seq->matchLength = matchLength;
+    seqState->dumps = dumps;
+}
+
+
+static size_t ZSTD_execSequence(BYTE* op,
+                                seq_t sequence,
+                                const BYTE** litPtr, const BYTE* const litLimit,
+                                BYTE* const base, BYTE* const oend)
+{
+    static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
+    static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11};   /* substracted */
+    const BYTE* const ostart = op;
+    const size_t litLength = sequence.litLength;
+    BYTE* const endMatch = op + litLength + sequence.matchLength;    /* risk : address space overflow (32-bits) */
+    const BYTE* const litEnd = *litPtr + litLength;
+
+    /* check */
+    if (endMatch > oend) return ERROR(dstSize_tooSmall);   /* overwrite beyond dst buffer */
+    if (litEnd > litLimit) return ERROR(corruption_detected);
+    if (sequence.matchLength > (size_t)(*litPtr-op))  return ERROR(dstSize_tooSmall);    /* overwrite literal segment */
+
+    /* copy Literals */
+    if (((size_t)(*litPtr - op) < 8) || ((size_t)(oend-litEnd) < 8) || (op+litLength > oend-8))
+        memmove(op, *litPtr, litLength);   /* overwrite risk */
+    else
+        ZSTD_wildcopy(op, *litPtr, litLength);
+    op += litLength;
+    *litPtr = litEnd;   /* update for next sequence */
+
+    /* check : last match must be at a minimum distance of 8 from end of dest buffer */
+    if (oend-op < 8) return ERROR(dstSize_tooSmall);
+
+    /* copy Match */
+    {
+        const U32 overlapRisk = (((size_t)(litEnd - endMatch)) < 12);
+        const BYTE* match = op - sequence.offset;            /* possible underflow at op - offset ? */
+        size_t qutt = 12;
+        U64 saved[2];
+
+        /* check */
+        if (match < base) return ERROR(corruption_detected);
+        if (sequence.offset > (size_t)base) return ERROR(corruption_detected);
+
+        /* save beginning of literal sequence, in case of write overlap */
+        if (overlapRisk)
+        {
+            if ((endMatch + qutt) > oend) qutt = oend-endMatch;
+            memcpy(saved, endMatch, qutt);
+        }
+
+        if (sequence.offset < 8)
+        {
+            const int dec64 = dec64table[sequence.offset];
+            op[0] = match[0];
+            op[1] = match[1];
+            op[2] = match[2];
+            op[3] = match[3];
+            match += dec32table[sequence.offset];
+            ZSTD_copy4(op+4, match);
+            match -= dec64;
+        } else { ZSTD_copy8(op, match); }
+        op += 8; match += 8;
+
+        if (endMatch > oend-(16-MINMATCH))
+        {
+            if (op < oend-8)
+            {
+                ZSTD_wildcopy(op, match, (oend-8) - op);
+                match += (oend-8) - op;
+                op = oend-8;
+            }
+            while (op<endMatch) *op++ = *match++;
+        }
+        else
+            ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
+
+        /* restore, in case of overlap */
+        if (overlapRisk) memcpy(endMatch, saved, qutt);
+    }
+
+    return endMatch-ostart;
+}
+
+typedef struct ZSTDv01_Dctx_s
+{
+    U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
+    U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
+    U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
+    void* previousDstEnd;
+    void* base;
+    size_t expected;
+    blockType_t bType;
+    U32 phase;
+} dctx_t;
+
+
+static size_t ZSTD_decompressSequences(
+                               void* ctx,
+                               void* dst, size_t maxDstSize,
+                         const void* seqStart, size_t seqSize,
+                         const BYTE* litStart, size_t litSize)
+{
+    dctx_t* dctx = (dctx_t*)ctx;
+    const BYTE* ip = (const BYTE*)seqStart;
+    const BYTE* const iend = ip + seqSize;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* op = ostart;
+    BYTE* const oend = ostart + maxDstSize;
+    size_t errorCode, dumpsLength;
+    const BYTE* litPtr = litStart;
+    const BYTE* const litEnd = litStart + litSize;
+    int nbSeq;
+    const BYTE* dumps;
+    U32* DTableLL = dctx->LLTable;
+    U32* DTableML = dctx->MLTable;
+    U32* DTableOffb = dctx->OffTable;
+    BYTE* const base = (BYTE*) (dctx->base);
+
+    /* Build Decoding Tables */
+    errorCode = ZSTDv01_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
+                                      DTableLL, DTableML, DTableOffb,
+                                      ip, iend-ip);
+    if (ZSTDv01_isError(errorCode)) return errorCode;
+    ip += errorCode;
+
+    /* Regen sequences */
+    {
+        seq_t sequence;
+        seqState_t seqState;
+
+        memset(&sequence, 0, sizeof(sequence));
+        seqState.dumps = dumps;
+        seqState.dumpsEnd = dumps + dumpsLength;
+        seqState.prevOffset = 1;
+        errorCode = FSE_initDStream(&(seqState.DStream), ip, iend-ip);
+        if (FSE_isError(errorCode)) return ERROR(corruption_detected);
+        FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
+        FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
+        FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
+
+        for ( ; (FSE_reloadDStream(&(seqState.DStream)) <= FSE_DStream_completed) && (nbSeq>0) ; )
+        {
+            size_t oneSeqSize;
+            nbSeq--;
+            ZSTD_decodeSequence(&sequence, &seqState);
+            oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend);
+            if (ZSTDv01_isError(oneSeqSize)) return oneSeqSize;
+            op += oneSeqSize;
+        }
+
+        /* check if reached exact end */
+        if ( !FSE_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected);   /* requested too much : data is corrupted */
+        if (nbSeq<0) return ERROR(corruption_detected);   /* requested too many sequences : data is corrupted */
+
+        /* last literal segment */
+        {
+            size_t lastLLSize = litEnd - litPtr;
+            if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
+            if (op != litPtr) memmove(op, litPtr, lastLLSize);
+            op += lastLLSize;
+        }
+    }
+
+    return op-ostart;
+}
+
+
+static size_t ZSTD_decompressBlock(
+                            void* ctx,
+                            void* dst, size_t maxDstSize,
+                      const void* src, size_t srcSize)
+{
+    /* blockType == blockCompressed, srcSize is trusted */
+    const BYTE* ip = (const BYTE*)src;
+    const BYTE* litPtr = NULL;
+    size_t litSize = 0;
+    size_t errorCode;
+
+    /* Decode literals sub-block */
+    errorCode = ZSTDv01_decodeLiteralsBlock(ctx, dst, maxDstSize, &litPtr, &litSize, src, srcSize);
+    if (ZSTDv01_isError(errorCode)) return errorCode;
+    ip += errorCode;
+    srcSize -= errorCode;
+
+    return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize, litPtr, litSize);
+}
+
+
+size_t ZSTDv01_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+    const BYTE* iend = ip + srcSize;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* op = ostart;
+    BYTE* const oend = ostart + maxDstSize;
+    size_t remainingSize = srcSize;
+    U32 magicNumber;
+    size_t errorCode=0;
+    blockProperties_t blockProperties;
+
+    /* Frame Header */
+    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+    magicNumber = ZSTD_readBE32(src);
+    if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
+    ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
+
+    /* Loop on each block */
+    while (1)
+    {
+        size_t blockSize = ZSTDv01_getcBlockSize(ip, iend-ip, &blockProperties);
+        if (ZSTDv01_isError(blockSize)) return blockSize;
+
+        ip += ZSTD_blockHeaderSize;
+        remainingSize -= ZSTD_blockHeaderSize;
+        if (blockSize > remainingSize) return ERROR(srcSize_wrong);
+
+        switch(blockProperties.blockType)
+        {
+        case bt_compressed:
+            errorCode = ZSTD_decompressBlock(ctx, op, oend-op, ip, blockSize);
+            break;
+        case bt_raw :
+            errorCode = ZSTD_copyUncompressedBlock(op, oend-op, ip, blockSize);
+            break;
+        case bt_rle :
+            return ERROR(GENERIC);   /* not yet supported */
+            break;
+        case bt_end :
+            /* end of frame */
+            if (remainingSize) return ERROR(srcSize_wrong);
+            break;
+        default:
+            return ERROR(GENERIC);
+        }
+        if (blockSize == 0) break;   /* bt_end */
+
+        if (ZSTDv01_isError(errorCode)) return errorCode;
+        op += errorCode;
+        ip += blockSize;
+        remainingSize -= blockSize;
+    }
+
+    return op-ostart;
+}
+
+size_t ZSTDv01_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    dctx_t ctx;
+    ctx.base = dst;
+    return ZSTDv01_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);
+}
+
+size_t ZSTDv01_findFrameCompressedSize(const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+    size_t remainingSize = srcSize;
+    U32 magicNumber;
+    blockProperties_t blockProperties;
+
+    /* Frame Header */
+    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+    magicNumber = ZSTD_readBE32(src);
+    if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
+    ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
+
+    /* Loop on each block */
+    while (1)
+    {
+        size_t blockSize = ZSTDv01_getcBlockSize(ip, remainingSize, &blockProperties);
+        if (ZSTDv01_isError(blockSize)) return blockSize;
+
+        ip += ZSTD_blockHeaderSize;
+        remainingSize -= ZSTD_blockHeaderSize;
+        if (blockSize > remainingSize) return ERROR(srcSize_wrong);
+
+        if (blockSize == 0) break;   /* bt_end */
+
+        ip += blockSize;
+        remainingSize -= blockSize;
+    }
+
+    return ip - (const BYTE*)src;
+}
+
+/*******************************
+*  Streaming Decompression API
+*******************************/
+
+size_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx)
+{
+    dctx->expected = ZSTD_frameHeaderSize;
+    dctx->phase = 0;
+    dctx->previousDstEnd = NULL;
+    dctx->base = NULL;
+    return 0;
+}
+
+ZSTDv01_Dctx* ZSTDv01_createDCtx(void)
+{
+    ZSTDv01_Dctx* dctx = (ZSTDv01_Dctx*)malloc(sizeof(ZSTDv01_Dctx));
+    if (dctx==NULL) return NULL;
+    ZSTDv01_resetDCtx(dctx);
+    return dctx;
+}
+
+size_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx)
+{
+    free(dctx);
+    return 0;
+}
+
+size_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx)
+{
+    return ((dctx_t*)dctx)->expected;
+}
+
+size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    dctx_t* ctx = (dctx_t*)dctx;
+
+    /* Sanity check */
+    if (srcSize != ctx->expected) return ERROR(srcSize_wrong);
+    if (dst != ctx->previousDstEnd)  /* not contiguous */
+        ctx->base = dst;
+
+    /* Decompress : frame header */
+    if (ctx->phase == 0)
+    {
+        /* Check frame magic header */
+        U32 magicNumber = ZSTD_readBE32(src);
+        if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
+        ctx->phase = 1;
+        ctx->expected = ZSTD_blockHeaderSize;
+        return 0;
+    }
+
+    /* Decompress : block header */
+    if (ctx->phase == 1)
+    {
+        blockProperties_t bp;
+        size_t blockSize = ZSTDv01_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
+        if (ZSTDv01_isError(blockSize)) return blockSize;
+        if (bp.blockType == bt_end)
+        {
+            ctx->expected = 0;
+            ctx->phase = 0;
+        }
+        else
+        {
+            ctx->expected = blockSize;
+            ctx->bType = bp.blockType;
+            ctx->phase = 2;
+        }
+
+        return 0;
+    }
+
+    /* Decompress : block content */
+    {
+        size_t rSize;
+        switch(ctx->bType)
+        {
+        case bt_compressed:
+            rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize);
+            break;
+        case bt_raw :
+            rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize);
+            break;
+        case bt_rle :
+            return ERROR(GENERIC);   /* not yet handled */
+            break;
+        case bt_end :   /* should never happen (filtered at phase 1) */
+            rSize = 0;
+            break;
+        default:
+            return ERROR(GENERIC);
+        }
+        ctx->phase = 1;
+        ctx->expected = ZSTD_blockHeaderSize;
+        ctx->previousDstEnd = (void*)( ((char*)dst) + rSize);
+        return rSize;
+    }
+
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_v01.h b/vendor/github.com/DataDog/zstd/zstd_v01.h
new file mode 100644
index 0000000..42f0897
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v01.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_V01_H_28739879432
+#define ZSTD_V01_H_28739879432
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* *************************************
+*  Includes
+***************************************/
+#include <stddef.h>   /* size_t */
+
+
+/* *************************************
+*  Simple one-step function
+***************************************/
+/**
+ZSTDv01_decompress() : decompress ZSTD frames compliant with v0.1.x format
+    compressedSize : is the exact source size
+    maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
+                      It must be equal or larger than originalSize, otherwise decompression will fail.
+    return : the number of bytes decompressed into destination buffer (originalSize)
+             or an errorCode if it fails (which can be tested using ZSTDv01_isError())
+*/
+size_t ZSTDv01_decompress( void* dst, size_t maxOriginalSize,
+                     const void* src, size_t compressedSize);
+
+/**
+ZSTDv01_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.1.x format
+    compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
+    return : the number of bytes that would be read to decompress this frame
+             or an errorCode if it fails (which can be tested using ZSTDv01_isError())
+*/
+size_t ZSTDv01_findFrameCompressedSize(const void* src, size_t compressedSize);
+
+/**
+ZSTDv01_isError() : tells if the result of ZSTDv01_decompress() is an error
+*/
+unsigned ZSTDv01_isError(size_t code);
+
+
+/* *************************************
+*  Advanced functions
+***************************************/
+typedef struct ZSTDv01_Dctx_s ZSTDv01_Dctx;
+ZSTDv01_Dctx* ZSTDv01_createDCtx(void);
+size_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx);
+
+size_t ZSTDv01_decompressDCtx(void* ctx,
+                              void* dst, size_t maxOriginalSize,
+                        const void* src, size_t compressedSize);
+
+/* *************************************
+*  Streaming functions
+***************************************/
+size_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx);
+
+size_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx);
+size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
+/**
+  Use above functions alternatively.
+  ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+  ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
+  Result is the number of bytes regenerated within 'dst'.
+  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
+*/
+
+/* *************************************
+*  Prefix - version detection
+***************************************/
+#define ZSTDv01_magicNumber   0xFD2FB51E   /* Big Endian version */
+#define ZSTDv01_magicNumberLE 0x1EB52FFD   /* Little Endian version */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_V01_H_28739879432 */
diff --git a/vendor/github.com/DataDog/zstd/zstd_v02.c b/vendor/github.com/DataDog/zstd/zstd_v02.c
new file mode 100644
index 0000000..8bc0ece
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v02.c
@@ -0,0 +1,3483 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+#include <stddef.h>    /* size_t, ptrdiff_t */
+#include "zstd_v02.h"
+#include "error_private.h"
+
+
+/******************************************
+*  Compiler-specific
+******************************************/
+#if defined(_MSC_VER)   /* Visual Studio */
+#   include <stdlib.h>  /* _byteswap_ulong */
+#   include <intrin.h>  /* _byteswap_* */
+#endif
+
+
+/* ******************************************************************
+   mem.h
+   low-level memory access routines
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef MEM_H_MODULE
+#define MEM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/******************************************
+*  Includes
+******************************************/
+#include <stddef.h>    /* size_t, ptrdiff_t */
+#include <string.h>    /* memcpy */
+
+
+/******************************************
+*  Compiler-specific
+******************************************/
+#if defined(__GNUC__)
+#  define MEM_STATIC static __attribute__((unused))
+#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+#  define MEM_STATIC static inline
+#elif defined(_MSC_VER)
+#  define MEM_STATIC static __inline
+#else
+#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+
+/****************************************************************
+*  Basic Types
+*****************************************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# include <stdint.h>
+  typedef  uint8_t BYTE;
+  typedef uint16_t U16;
+  typedef  int16_t S16;
+  typedef uint32_t U32;
+  typedef  int32_t S32;
+  typedef uint64_t U64;
+  typedef  int64_t S64;
+#else
+  typedef unsigned char       BYTE;
+  typedef unsigned short      U16;
+  typedef   signed short      S16;
+  typedef unsigned int        U32;
+  typedef   signed int        S32;
+  typedef unsigned long long  U64;
+  typedef   signed long long  S64;
+#endif
+
+
+/****************************************************************
+*  Memory I/O
+*****************************************************************/
+/* MEM_FORCE_MEMORY_ACCESS
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ *            It can generate buggy code on targets generating assembly depending on alignment.
+ *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
+#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+#    define MEM_FORCE_MEMORY_ACCESS 2
+#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
+  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+#    define MEM_FORCE_MEMORY_ACCESS 1
+#  endif
+#endif
+
+MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }
+MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }
+
+MEM_STATIC unsigned MEM_isLittleEndian(void)
+{
+    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */
+    return one.c[0];
+}
+
+#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
+
+/* violates C standard on structure alignment.
+Only use if no other choice to achieve best performance on target platform */
+MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
+MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
+MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
+
+#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;
+
+MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
+MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
+
+#else
+
+/* default method, safe and standard.
+   can sometimes prove slower */
+
+MEM_STATIC U16 MEM_read16(const void* memPtr)
+{
+    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U32 MEM_read32(const void* memPtr)
+{
+    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U64 MEM_read64(const void* memPtr)
+{
+    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value)
+{
+    memcpy(memPtr, &value, sizeof(value));
+}
+
+#endif // MEM_FORCE_MEMORY_ACCESS
+
+
+MEM_STATIC U16 MEM_readLE16(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read16(memPtr);
+    else
+    {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U16)(p[0] + (p[1]<<8));
+    }
+}
+
+MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
+{
+    if (MEM_isLittleEndian())
+    {
+        MEM_write16(memPtr, val);
+    }
+    else
+    {
+        BYTE* p = (BYTE*)memPtr;
+        p[0] = (BYTE)val;
+        p[1] = (BYTE)(val>>8);
+    }
+}
+
+MEM_STATIC U32 MEM_readLE32(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read32(memPtr);
+    else
+    {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
+    }
+}
+
+
+MEM_STATIC U64 MEM_readLE64(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read64(memPtr);
+    else
+    {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
+                     + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
+    }
+}
+
+
+MEM_STATIC size_t MEM_readLEST(const void* memPtr)
+{
+    if (MEM_32bits())
+        return (size_t)MEM_readLE32(memPtr);
+    else
+        return (size_t)MEM_readLE64(memPtr);
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* MEM_H_MODULE */
+
+
+/* ******************************************************************
+   bitstream
+   Part of NewGen Entropy library
+   header file (to include)
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+   - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef BITSTREAM_H_MODULE
+#define BITSTREAM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*
+*  This API consists of small unitary functions, which highly benefit from being inlined.
+*  Since link-time-optimization is not available for all compilers,
+*  these functions are defined into a .h to be included.
+*/
+
+
+/**********************************************
+*  bitStream decompression API (read backward)
+**********************************************/
+typedef struct
+{
+    size_t   bitContainer;
+    unsigned bitsConsumed;
+    const char* ptr;
+    const char* start;
+} BIT_DStream_t;
+
+typedef enum { BIT_DStream_unfinished = 0,
+               BIT_DStream_endOfBuffer = 1,
+               BIT_DStream_completed = 2,
+               BIT_DStream_overflow = 3 } BIT_DStream_status;  /* result of BIT_reloadDStream() */
+               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+
+MEM_STATIC size_t   BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
+MEM_STATIC size_t   BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
+
+
+/******************************************
+*  unsafe API
+******************************************/
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
+/* faster, but works only if nbBits >= 1 */
+
+
+
+/****************************************************************
+*  Helper functions
+****************************************************************/
+MEM_STATIC unsigned BIT_highbit32 (U32 val)
+{
+#   if defined(_MSC_VER)   /* Visual */
+    unsigned long r=0;
+    _BitScanReverse ( &r, val );
+    return (unsigned) r;
+#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */
+    return 31 - __builtin_clz (val);
+#   else   /* Software version */
+    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+    U32 v = val;
+    unsigned r;
+    v |= v >> 1;
+    v |= v >> 2;
+    v |= v >> 4;
+    v |= v >> 8;
+    v |= v >> 16;
+    r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+    return r;
+#   endif
+}
+
+
+
+/**********************************************************
+* bitStream decoding
+**********************************************************/
+
+/*!BIT_initDStream
+*  Initialize a BIT_DStream_t.
+*  @bitD : a pointer to an already allocated BIT_DStream_t structure
+*  @srcBuffer must point at the beginning of a bitStream
+*  @srcSize must be the exact size of the bitStream
+*  @result : size of stream (== srcSize) or an errorCode if a problem is detected
+*/
+MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
+{
+    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
+
+    if (srcSize >=  sizeof(size_t))   /* normal case */
+    {
+        U32 contain32;
+        bitD->start = (const char*)srcBuffer;
+        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(size_t);
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);
+        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
+        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */
+        bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
+    }
+    else
+    {
+        U32 contain32;
+        bitD->start = (const char*)srcBuffer;
+        bitD->ptr   = bitD->start;
+        bitD->bitContainer = *(const BYTE*)(bitD->start);
+        switch(srcSize)
+        {
+            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);
+            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);
+            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);
+            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;
+            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;
+            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) <<  8;
+            default:;
+        }
+        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
+        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */
+        bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
+        bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
+    }
+
+    return srcSize;
+}
+
+MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
+    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
+}
+
+/*! BIT_lookBitsFast :
+*   unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits)
+{
+    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
+    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
+}
+
+MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+    bitD->bitsConsumed += nbBits;
+}
+
+MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+    size_t value = BIT_lookBits(bitD, nbBits);
+    BIT_skipBits(bitD, nbBits);
+    return value;
+}
+
+/*!BIT_readBitsFast :
+*  unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
+{
+    size_t value = BIT_lookBitsFast(bitD, nbBits);
+    BIT_skipBits(bitD, nbBits);
+    return value;
+}
+
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
+{
+    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
+        return BIT_DStream_overflow;
+
+    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
+    {
+        bitD->ptr -= bitD->bitsConsumed >> 3;
+        bitD->bitsConsumed &= 7;
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);
+        return BIT_DStream_unfinished;
+    }
+    if (bitD->ptr == bitD->start)
+    {
+        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
+        return BIT_DStream_completed;
+    }
+    {
+        U32 nbBytes = bitD->bitsConsumed >> 3;
+        BIT_DStream_status result = BIT_DStream_unfinished;
+        if (bitD->ptr - nbBytes < bitD->start)
+        {
+            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */
+            result = BIT_DStream_endOfBuffer;
+        }
+        bitD->ptr -= nbBytes;
+        bitD->bitsConsumed -= nbBytes*8;
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */
+        return result;
+    }
+}
+
+/*! BIT_endOfDStream
+*   @return Tells if DStream has reached its exact end
+*/
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
+{
+    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* BITSTREAM_H_MODULE */
+/* ******************************************************************
+   Error codes and messages
+   Copyright (C) 2013-2015, Yann Collet
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+   - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef ERROR_H_MODULE
+#define ERROR_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/******************************************
+*  Compiler-specific
+******************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+#  define ERR_STATIC static inline
+#elif defined(_MSC_VER)
+#  define ERR_STATIC static __inline
+#elif defined(__GNUC__)
+#  define ERR_STATIC static __attribute__((unused))
+#else
+#  define ERR_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+
+/******************************************
+*  Error Management
+******************************************/
+#define PREFIX(name) ZSTD_error_##name
+
+#define ERROR(name) (size_t)-PREFIX(name)
+
+#define ERROR_LIST(ITEM) \
+        ITEM(PREFIX(No_Error)) ITEM(PREFIX(GENERIC)) \
+        ITEM(PREFIX(dstSize_tooSmall)) ITEM(PREFIX(srcSize_wrong)) \
+        ITEM(PREFIX(prefix_unknown)) ITEM(PREFIX(corruption_detected)) \
+        ITEM(PREFIX(tableLog_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooSmall)) \
+        ITEM(PREFIX(maxCode))
+
+#define ERROR_GENERATE_ENUM(ENUM) ENUM,
+typedef enum { ERROR_LIST(ERROR_GENERATE_ENUM) } ERR_codes;  /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */
+
+#define ERROR_CONVERTTOSTRING(STRING) #STRING,
+#define ERROR_GENERATE_STRING(EXPR) ERROR_CONVERTTOSTRING(EXPR)
+static const char* ERR_strings[] = { ERROR_LIST(ERROR_GENERATE_STRING) };
+
+ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
+
+ERR_STATIC const char* ERR_getErrorName(size_t code)
+{
+    static const char* codeError = "Unspecified error code";
+    if (ERR_isError(code)) return ERR_strings[-(int)(code)];
+    return codeError;
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ERROR_H_MODULE */
+/*
+Constructor and Destructor of type FSE_CTable
+    Note that its size depends on 'tableLog' and 'maxSymbolValue' */
+typedef unsigned FSE_CTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
+typedef unsigned FSE_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
+
+
+/* ******************************************************************
+   FSE : Finite State Entropy coder
+   header file for static linking (only)
+   Copyright (C) 2013-2015, Yann Collet
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+   - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/******************************************
+*  Static allocation
+******************************************/
+/* FSE buffer bounds */
+#define FSE_NCOUNTBOUND 512
+#define FSE_BLOCKBOUND(size) (size + (size>>7))
+#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
+
+/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */
+#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue)   (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
+#define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))
+
+
+/******************************************
+*  FSE advanced API
+******************************************/
+static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
+/* build a fake FSE_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
+
+static size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
+/* build a fake FSE_DTable, designed to always generate the same symbolValue */
+
+
+/******************************************
+*  FSE symbol decompression API
+******************************************/
+typedef struct
+{
+    size_t      state;
+    const void* table;   /* precise table may vary, depending on U16 */
+} FSE_DState_t;
+
+
+static void     FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
+
+static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+
+static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
+
+
+/******************************************
+*  FSE unsafe API
+******************************************/
+static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
+
+
+/******************************************
+*  Implementation of inline functions
+******************************************/
+
+/* decompression */
+
+typedef struct {
+    U16 tableLog;
+    U16 fastMode;
+} FSE_DTableHeader;   /* sizeof U32 */
+
+typedef struct
+{
+    unsigned short newState;
+    unsigned char  symbol;
+    unsigned char  nbBits;
+} FSE_decode_t;   /* size == U32 */
+
+MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
+{
+    FSE_DTableHeader DTableH;
+    memcpy(&DTableH, dt, sizeof(DTableH));
+    DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog);
+    BIT_reloadDStream(bitD);
+    DStatePtr->table = dt + 1;
+}
+
+MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    const U32  nbBits = DInfo.nbBits;
+    BYTE symbol = DInfo.symbol;
+    size_t lowBits = BIT_readBits(bitD, nbBits);
+
+    DStatePtr->state = DInfo.newState + lowBits;
+    return symbol;
+}
+
+MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    const U32 nbBits = DInfo.nbBits;
+    BYTE symbol = DInfo.symbol;
+    size_t lowBits = BIT_readBitsFast(bitD, nbBits);
+
+    DStatePtr->state = DInfo.newState + lowBits;
+    return symbol;
+}
+
+MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
+{
+    return DStatePtr->state == 0;
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+/* ******************************************************************
+   Huff0 : Huffman coder, part of New Generation Entropy library
+   header file for static linking (only)
+   Copyright (C) 2013-2015, Yann Collet
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+   - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/******************************************
+*  Static allocation macros
+******************************************/
+/* Huff0 buffer bounds */
+#define HUF_CTABLEBOUND 129
+#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true if incompressible pre-filtered with fast heuristic */
+#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
+
+/* static allocation of Huff0's DTable */
+#define HUF_DTABLE_SIZE(maxTableLog)   (1 + (1<<maxTableLog))  /* nb Cells; use unsigned short for X2, unsigned int for X4 */
+#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
+        unsigned short DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
+#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
+        unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
+#define HUF_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
+        unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
+
+
+/******************************************
+*  Advanced functions
+******************************************/
+static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
+static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbols decoder */
+static size_t HUF_decompress4X6 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* quad-symbols decoder */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+/*
+    zstd - standard compression library
+    Header File
+    Copyright (C) 2014-2015, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd source repository : https://github.com/Cyan4973/zstd
+    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* *************************************
+*  Includes
+***************************************/
+#include <stddef.h>   /* size_t */
+
+
+/* *************************************
+*  Version
+***************************************/
+#define ZSTD_VERSION_MAJOR    0    /* for breaking interface changes  */
+#define ZSTD_VERSION_MINOR    2    /* for new (non-breaking) interface capabilities */
+#define ZSTD_VERSION_RELEASE  2    /* for tweaks, bug-fixes, or development */
+#define ZSTD_VERSION_NUMBER  (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
+
+
+/* *************************************
+*  Advanced functions
+***************************************/
+typedef struct ZSTD_CCtx_s ZSTD_CCtx;   /* incomplete type */
+
+#if defined (__cplusplus)
+}
+#endif
+/*
+    zstd - standard compression library
+    Header File for static linking only
+    Copyright (C) 2014-2015, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd source repository : https://github.com/Cyan4973/zstd
+    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+/* The objects defined into this file should be considered experimental.
+ * They are not labelled stable, as their prototype may change in the future.
+ * You can use them for tests, provide feedback, or if you can endure risk of future changes.
+ */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* *************************************
+*  Streaming functions
+***************************************/
+
+typedef struct ZSTD_DCtx_s ZSTD_DCtx;
+
+/*
+  Use above functions alternatively.
+  ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+  ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
+  Result is the number of bytes regenerated within 'dst'.
+  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
+*/
+
+/* *************************************
+*  Prefix - version detection
+***************************************/
+#define ZSTD_magicNumber 0xFD2FB522   /* v0.2 (current)*/
+
+
+#if defined (__cplusplus)
+}
+#endif
+/* ******************************************************************
+   FSE : Finite State Entropy coder
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/****************************************************************
+*  Tuning parameters
+****************************************************************/
+/* MEMORY_USAGE :
+*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+*  Increasing memory usage improves compression ratio
+*  Reduced memory usage can improve speed, due to cache effect
+*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#define FSE_MAX_MEMORY_USAGE 14
+#define FSE_DEFAULT_MEMORY_USAGE 13
+
+/* FSE_MAX_SYMBOL_VALUE :
+*  Maximum symbol value authorized.
+*  Required for proper stack allocation */
+#define FSE_MAX_SYMBOL_VALUE 255
+
+
+/****************************************************************
+*  template functions type & suffix
+****************************************************************/
+#define FSE_FUNCTION_TYPE BYTE
+#define FSE_FUNCTION_EXTENSION
+
+
+/****************************************************************
+*  Byte symbol type
+****************************************************************/
+#endif   /* !FSE_COMMONDEFS_ONLY */
+
+
+/****************************************************************
+*  Compiler specifics
+****************************************************************/
+#ifdef _MSC_VER    /* Visual Studio */
+#  define FORCE_INLINE static __forceinline
+#  include <intrin.h>                    /* For Visual 2005 */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */
+#else
+#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
+#    ifdef __GNUC__
+#      define FORCE_INLINE static inline __attribute__((always_inline))
+#    else
+#      define FORCE_INLINE static inline
+#    endif
+#  else
+#    define FORCE_INLINE static
+#  endif /* __STDC_VERSION__ */
+#endif
+
+
+/****************************************************************
+*  Includes
+****************************************************************/
+#include <stdlib.h>     /* malloc, free, qsort */
+#include <string.h>     /* memcpy, memset */
+#include <stdio.h>      /* printf (debug) */
+
+/****************************************************************
+*  Constants
+*****************************************************************/
+#define FSE_MAX_TABLELOG  (FSE_MAX_MEMORY_USAGE-2)
+#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
+#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
+#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
+#define FSE_MIN_TABLELOG 5
+
+#define FSE_TABLELOG_ABSOLUTE_MAX 15
+#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
+#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+
+/****************************************************************
+*  Error Management
+****************************************************************/
+#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+
+
+/****************************************************************
+*  Complex types
+****************************************************************/
+typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
+
+
+/****************************************************************
+*  Templates
+****************************************************************/
+/*
+  designed to be included
+  for type-specific functions (template emulation in C)
+  Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+#  error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+#  error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+
+/* Function templates */
+
+#define FSE_DECODE_TYPE FSE_decode_t
+
+static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
+
+static size_t FSE_buildDTable
+(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+    void* ptr = dt+1;
+    FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)ptr;
+    FSE_DTableHeader DTableH;
+    const U32 tableSize = 1 << tableLog;
+    const U32 tableMask = tableSize-1;
+    const U32 step = FSE_tableStep(tableSize);
+    U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
+    U32 position = 0;
+    U32 highThreshold = tableSize-1;
+    const S16 largeLimit= (S16)(1 << (tableLog-1));
+    U32 noLarge = 1;
+    U32 s;
+
+    /* Sanity Checks */
+    if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
+    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+
+    /* Init, lay down lowprob symbols */
+    DTableH.tableLog = (U16)tableLog;
+    for (s=0; s<=maxSymbolValue; s++)
+    {
+        if (normalizedCounter[s]==-1)
+        {
+            tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
+            symbolNext[s] = 1;
+        }
+        else
+        {
+            if (normalizedCounter[s] >= largeLimit) noLarge=0;
+            symbolNext[s] = normalizedCounter[s];
+        }
+    }
+
+    /* Spread symbols */
+    for (s=0; s<=maxSymbolValue; s++)
+    {
+        int i;
+        for (i=0; i<normalizedCounter[s]; i++)
+        {
+            tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
+            position = (position + step) & tableMask;
+            while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
+        }
+    }
+
+    if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+
+    /* Build Decoding table */
+    {
+        U32 i;
+        for (i=0; i<tableSize; i++)
+        {
+            FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);
+            U16 nextState = symbolNext[symbol]++;
+            tableDecode[i].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) );
+            tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
+        }
+    }
+
+    DTableH.fastMode = (U16)noLarge;
+    memcpy(dt, &DTableH, sizeof(DTableH));   /* memcpy(), to avoid strict aliasing warnings */
+    return 0;
+}
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+/******************************************
+*  FSE helper functions
+******************************************/
+static unsigned FSE_isError(size_t code) { return ERR_isError(code); }
+
+
+/****************************************************************
+*  FSE NCount encoding-decoding
+****************************************************************/
+static short FSE_abs(short a)
+{
+    return (short)(a<0 ? -a : a);
+}
+
+static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+                 const void* headerBuffer, size_t hbSize)
+{
+    const BYTE* const istart = (const BYTE*) headerBuffer;
+    const BYTE* const iend = istart + hbSize;
+    const BYTE* ip = istart;
+    int nbBits;
+    int remaining;
+    int threshold;
+    U32 bitStream;
+    int bitCount;
+    unsigned charnum = 0;
+    int previous0 = 0;
+
+    if (hbSize < 4) return ERROR(srcSize_wrong);
+    bitStream = MEM_readLE32(ip);
+    nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */
+    if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
+    bitStream >>= 4;
+    bitCount = 4;
+    *tableLogPtr = nbBits;
+    remaining = (1<<nbBits)+1;
+    threshold = 1<<nbBits;
+    nbBits++;
+
+    while ((remaining>1) && (charnum<=*maxSVPtr))
+    {
+        if (previous0)
+        {
+            unsigned n0 = charnum;
+            while ((bitStream & 0xFFFF) == 0xFFFF)
+            {
+                n0+=24;
+                if (ip < iend-5)
+                {
+                    ip+=2;
+                    bitStream = MEM_readLE32(ip) >> bitCount;
+                }
+                else
+                {
+                    bitStream >>= 16;
+                    bitCount+=16;
+                }
+            }
+            while ((bitStream & 3) == 3)
+            {
+                n0+=3;
+                bitStream>>=2;
+                bitCount+=2;
+            }
+            n0 += bitStream & 3;
+            bitCount += 2;
+            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
+            while (charnum < n0) normalizedCounter[charnum++] = 0;
+            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
+            {
+                ip += bitCount>>3;
+                bitCount &= 7;
+                bitStream = MEM_readLE32(ip) >> bitCount;
+            }
+            else
+                bitStream >>= 2;
+        }
+        {
+            const short max = (short)((2*threshold-1)-remaining);
+            short count;
+
+            if ((bitStream & (threshold-1)) < (U32)max)
+            {
+                count = (short)(bitStream & (threshold-1));
+                bitCount   += nbBits-1;
+            }
+            else
+            {
+                count = (short)(bitStream & (2*threshold-1));
+                if (count >= threshold) count -= max;
+                bitCount   += nbBits;
+            }
+
+            count--;   /* extra accuracy */
+            remaining -= FSE_abs(count);
+            normalizedCounter[charnum++] = count;
+            previous0 = !count;
+            while (remaining < threshold)
+            {
+                nbBits--;
+                threshold >>= 1;
+            }
+
+            {
+                if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
+                {
+                    ip += bitCount>>3;
+                    bitCount &= 7;
+                }
+                else
+                {
+                    bitCount -= (int)(8 * (iend - 4 - ip));
+                    ip = iend - 4;
+                }
+                bitStream = MEM_readLE32(ip) >> (bitCount & 31);
+            }
+        }
+    }
+    if (remaining != 1) return ERROR(GENERIC);
+    *maxSVPtr = charnum-1;
+
+    ip += (bitCount+7)>>3;
+    if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
+    return ip-istart;
+}
+
+
+/*********************************************************
+*  Decompression (Byte symbols)
+*********************************************************/
+static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
+{
+    void* ptr = dt;
+    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+    FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1;   /* because dt is unsigned */
+
+    DTableH->tableLog = 0;
+    DTableH->fastMode = 0;
+
+    cell->newState = 0;
+    cell->symbol = symbolValue;
+    cell->nbBits = 0;
+
+    return 0;
+}
+
+
+static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
+{
+    void* ptr = dt;
+    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+    FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1;   /* because dt is unsigned */
+    const unsigned tableSize = 1 << nbBits;
+    const unsigned tableMask = tableSize - 1;
+    const unsigned maxSymbolValue = tableMask;
+    unsigned s;
+
+    /* Sanity checks */
+    if (nbBits < 1) return ERROR(GENERIC);         /* min size */
+
+    /* Build Decoding Table */
+    DTableH->tableLog = (U16)nbBits;
+    DTableH->fastMode = 1;
+    for (s=0; s<=maxSymbolValue; s++)
+    {
+        dinfo[s].newState = 0;
+        dinfo[s].symbol = (BYTE)s;
+        dinfo[s].nbBits = (BYTE)nbBits;
+    }
+
+    return 0;
+}
+
+FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
+          void* dst, size_t maxDstSize,
+    const void* cSrc, size_t cSrcSize,
+    const FSE_DTable* dt, const unsigned fast)
+{
+    BYTE* const ostart = (BYTE*) dst;
+    BYTE* op = ostart;
+    BYTE* const omax = op + maxDstSize;
+    BYTE* const olimit = omax-3;
+
+    BIT_DStream_t bitD;
+    FSE_DState_t state1;
+    FSE_DState_t state2;
+    size_t errorCode;
+
+    /* Init */
+    errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);   /* replaced last arg by maxCompressed Size */
+    if (FSE_isError(errorCode)) return errorCode;
+
+    FSE_initDState(&state1, &bitD, dt);
+    FSE_initDState(&state2, &bitD, dt);
+
+#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
+
+    /* 4 symbols per loop */
+    for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op<olimit) ; op+=4)
+    {
+        op[0] = FSE_GETSYMBOL(&state1);
+
+        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            BIT_reloadDStream(&bitD);
+
+        op[1] = FSE_GETSYMBOL(&state2);
+
+        if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
+
+        op[2] = FSE_GETSYMBOL(&state1);
+
+        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            BIT_reloadDStream(&bitD);
+
+        op[3] = FSE_GETSYMBOL(&state2);
+    }
+
+    /* tail */
+    /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
+    while (1)
+    {
+        if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )
+            break;
+
+        *op++ = FSE_GETSYMBOL(&state1);
+
+        if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )
+            break;
+
+        *op++ = FSE_GETSYMBOL(&state2);
+    }
+
+    /* end ? */
+    if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))
+        return op-ostart;
+
+    if (op==omax) return ERROR(dstSize_tooSmall);   /* dst buffer is full, but cSrc unfinished */
+
+    return ERROR(corruption_detected);
+}
+
+
+static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
+                            const void* cSrc, size_t cSrcSize,
+                            const FSE_DTable* dt)
+{
+    FSE_DTableHeader DTableH;
+    memcpy(&DTableH, dt, sizeof(DTableH));
+
+    /* select fast mode (static) */
+    if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+    return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+
+static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
+{
+    const BYTE* const istart = (const BYTE*)cSrc;
+    const BYTE* ip = istart;
+    short counting[FSE_MAX_SYMBOL_VALUE+1];
+    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */
+    unsigned tableLog;
+    unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+    size_t errorCode;
+
+    if (cSrcSize<2) return ERROR(srcSize_wrong);   /* too small input size */
+
+    /* normal FSE decoding mode */
+    errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
+    if (FSE_isError(errorCode)) return errorCode;
+    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size */
+    ip += errorCode;
+    cSrcSize -= errorCode;
+
+    errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);
+    if (FSE_isError(errorCode)) return errorCode;
+
+    /* always return, even if it is an error code */
+    return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
+}
+
+
+
+#endif   /* FSE_COMMONDEFS_ONLY */
+/* ******************************************************************
+   Huff0 : Huffman coder, part of New Generation Entropy library
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/****************************************************************
+*  Compiler specifics
+****************************************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+/* inline is defined */
+#elif defined(_MSC_VER)
+#  define inline __inline
+#else
+#  define inline /* disable inline */
+#endif
+
+
+#ifdef _MSC_VER    /* Visual Studio */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#endif
+
+
+/****************************************************************
+*  Includes
+****************************************************************/
+#include <stdlib.h>     /* malloc, free, qsort */
+#include <string.h>     /* memcpy, memset */
+#include <stdio.h>      /* printf (debug) */
+
+/****************************************************************
+*  Error Management
+****************************************************************/
+#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+
+
+/******************************************
+*  Helper functions
+******************************************/
+static unsigned HUF_isError(size_t code) { return ERR_isError(code); }
+
+#define HUF_ABSOLUTEMAX_TABLELOG  16   /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
+#define HUF_MAX_TABLELOG  12           /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
+#define HUF_DEFAULT_TABLELOG  HUF_MAX_TABLELOG   /* tableLog by default, when not specified */
+#define HUF_MAX_SYMBOL_VALUE 255
+#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)
+#  error "HUF_MAX_TABLELOG is too large !"
+#endif
+
+
+
+/*********************************************************
+*  Huff0 : Huffman block decompression
+*********************************************************/
+typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2;   /* single-symbol decoding */
+
+typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4;  /* double-symbols decoding */
+
+typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
+
+/*! HUF_readStats
+    Read compact Huffman tree, saved by HUF_writeCTable
+    @huffWeight : destination buffer
+    @return : size read from `src`
+*/
+static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+                            U32* nbSymbolsPtr, U32* tableLogPtr,
+                            const void* src, size_t srcSize)
+{
+    U32 weightTotal;
+    U32 tableLog;
+    const BYTE* ip = (const BYTE*) src;
+    size_t iSize;
+    size_t oSize;
+    U32 n;
+
+    if (!srcSize) return ERROR(srcSize_wrong);
+    iSize = ip[0];
+    //memset(huffWeight, 0, hwSize);   /* is not necessary, even though some analyzer complain ... */
+
+    if (iSize >= 128)  /* special header */
+    {
+        if (iSize >= (242))   /* RLE */
+        {
+            static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
+            oSize = l[iSize-242];
+            memset(huffWeight, 1, hwSize);
+            iSize = 0;
+        }
+        else   /* Incompressible */
+        {
+            oSize = iSize - 127;
+            iSize = ((oSize+1)/2);
+            if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+            if (oSize >= hwSize) return ERROR(corruption_detected);
+            ip += 1;
+            for (n=0; n<oSize; n+=2)
+            {
+                huffWeight[n]   = ip[n/2] >> 4;
+                huffWeight[n+1] = ip[n/2] & 15;
+            }
+        }
+    }
+    else  /* header compressed with FSE (normal case) */
+    {
+        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+        oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize);   /* max (hwSize-1) values decoded, as last one is implied */
+        if (FSE_isError(oSize)) return oSize;
+    }
+
+    /* collect weight stats */
+    memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
+    weightTotal = 0;
+    for (n=0; n<oSize; n++)
+    {
+        if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
+        rankStats[huffWeight[n]]++;
+        weightTotal += (1 << huffWeight[n]) >> 1;
+    }
+    if (weightTotal == 0) return ERROR(corruption_detected);
+
+    /* get last non-null symbol weight (implied, total must be 2^n) */
+    tableLog = BIT_highbit32(weightTotal) + 1;
+    if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
+    {
+        U32 total = 1 << tableLog;
+        U32 rest = total - weightTotal;
+        U32 verif = 1 << BIT_highbit32(rest);
+        U32 lastWeight = BIT_highbit32(rest) + 1;
+        if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */
+        huffWeight[oSize] = (BYTE)lastWeight;
+        rankStats[lastWeight]++;
+    }
+
+    /* check tree construction validity */
+    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */
+
+    /* results */
+    *nbSymbolsPtr = (U32)(oSize+1);
+    *tableLogPtr = tableLog;
+    return iSize+1;
+}
+
+
+/**************************/
+/* single-symbol decoding */
+/**************************/
+
+static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
+{
+    BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];
+    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];   /* large enough for values from 0 to 16 */
+    U32 tableLog = 0;
+    const BYTE* ip = (const BYTE*) src;
+    size_t iSize = ip[0];
+    U32 nbSymbols = 0;
+    U32 n;
+    U32 nextRankStart;
+    void* ptr = DTable+1;
+    HUF_DEltX2* const dt = (HUF_DEltX2*)ptr;
+
+    HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16));   /* if compilation fails here, assertion is false */
+    //memset(huffWeight, 0, sizeof(huffWeight));   /* is not necessary, even though some analyzer complain ... */
+
+    iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
+    if (HUF_isError(iSize)) return iSize;
+
+    /* check result */
+    if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge);   /* DTable is too small */
+    DTable[0] = (U16)tableLog;   /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */
+
+    /* Prepare ranks */
+    nextRankStart = 0;
+    for (n=1; n<=tableLog; n++)
+    {
+        U32 current = nextRankStart;
+        nextRankStart += (rankVal[n] << (n-1));
+        rankVal[n] = current;
+    }
+
+    /* fill DTable */
+    for (n=0; n<nbSymbols; n++)
+    {
+        const U32 w = huffWeight[n];
+        const U32 length = (1 << w) >> 1;
+        U32 i;
+        HUF_DEltX2 D;
+        D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
+        for (i = rankVal[w]; i < rankVal[w] + length; i++)
+            dt[i] = D;
+        rankVal[w] += length;
+    }
+
+    return iSize;
+}
+
+static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+        const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+        const BYTE c = dt[val].byte;
+        BIT_skipBits(Dstream, dt[val].nbBits);
+        return c;
+}
+
+#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+    *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+    if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
+        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+    if (MEM_64bits()) \
+        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
+{
+    BYTE* const pStart = p;
+
+    /* up to 4 symbols at a time */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))
+    {
+        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
+        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+    }
+
+    /* closer to the end */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
+        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+    /* no more data to retrieve from bitstream, hence no need to reload */
+    while (p < pEnd)
+        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+    return pEnd-pStart;
+}
+
+
+static size_t HUF_decompress4X2_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const U16* DTable)
+{
+    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
+
+    {
+        const BYTE* const istart = (const BYTE*) cSrc;
+        BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+
+        const void* ptr = DTable;
+        const HUF_DEltX2* const dt = ((const HUF_DEltX2*)ptr) +1;
+        const U32 dtLog = DTable[0];
+        size_t errorCode;
+
+        /* Init */
+        BIT_DStream_t bitD1;
+        BIT_DStream_t bitD2;
+        BIT_DStream_t bitD3;
+        BIT_DStream_t bitD4;
+        const size_t length1 = MEM_readLE16(istart);
+        const size_t length2 = MEM_readLE16(istart+2);
+        const size_t length3 = MEM_readLE16(istart+4);
+        size_t length4;
+        const BYTE* const istart1 = istart + 6;  /* jumpTable */
+        const BYTE* const istart2 = istart1 + length1;
+        const BYTE* const istart3 = istart2 + length2;
+        const BYTE* const istart4 = istart3 + length3;
+        const size_t segmentSize = (dstSize+3) / 4;
+        BYTE* const opStart2 = ostart + segmentSize;
+        BYTE* const opStart3 = opStart2 + segmentSize;
+        BYTE* const opStart4 = opStart3 + segmentSize;
+        BYTE* op1 = ostart;
+        BYTE* op2 = opStart2;
+        BYTE* op3 = opStart3;
+        BYTE* op4 = opStart4;
+        U32 endSignal;
+
+        length4 = cSrcSize - (length1 + length2 + length3 + 6);
+        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
+        errorCode = BIT_initDStream(&bitD1, istart1, length1);
+        if (HUF_isError(errorCode)) return errorCode;
+        errorCode = BIT_initDStream(&bitD2, istart2, length2);
+        if (HUF_isError(errorCode)) return errorCode;
+        errorCode = BIT_initDStream(&bitD3, istart3, length3);
+        if (HUF_isError(errorCode)) return errorCode;
+        errorCode = BIT_initDStream(&bitD4, istart4, length4);
+        if (HUF_isError(errorCode)) return errorCode;
+
+        /* 16-32 symbols per loop (4-8 symbols per stream) */
+        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
+        {
+            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
+            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+
+            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        }
+
+        /* check corruption */
+        if (op1 > opStart2) return ERROR(corruption_detected);
+        if (op2 > opStart3) return ERROR(corruption_detected);
+        if (op3 > opStart4) return ERROR(corruption_detected);
+        /* note : op4 supposed already verified within main loop */
+
+        /* finish bitStreams one by one */
+        HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+        HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+        HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+        HUF_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
+
+        /* check */
+        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+        if (!endSignal) return ERROR(corruption_detected);
+
+        /* decoded size */
+        return dstSize;
+    }
+}
+
+
+static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG);
+    const BYTE* ip = (const BYTE*) cSrc;
+    size_t errorCode;
+
+    errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize);
+    if (HUF_isError(errorCode)) return errorCode;
+    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += errorCode;
+    cSrcSize -= errorCode;
+
+    return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+/***************************/
+/* double-symbols decoding */
+/***************************/
+
+static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,
+                           const U32* rankValOrigin, const int minWeight,
+                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
+                           U32 nbBitsBaseline, U16 baseSeq)
+{
+    HUF_DEltX4 DElt;
+    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
+    U32 s;
+
+    /* get pre-calculated rankVal */
+    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+    /* fill skipped values */
+    if (minWeight>1)
+    {
+        U32 i, skipSize = rankVal[minWeight];
+        MEM_writeLE16(&(DElt.sequence), baseSeq);
+        DElt.nbBits   = (BYTE)(consumed);
+        DElt.length   = 1;
+        for (i = 0; i < skipSize; i++)
+            DTable[i] = DElt;
+    }
+
+    /* fill DTable */
+    for (s=0; s<sortedListSize; s++)   /* note : sortedSymbols already skipped */
+    {
+        const U32 symbol = sortedSymbols[s].symbol;
+        const U32 weight = sortedSymbols[s].weight;
+        const U32 nbBits = nbBitsBaseline - weight;
+        const U32 length = 1 << (sizeLog-nbBits);
+        const U32 start = rankVal[weight];
+        U32 i = start;
+        const U32 end = start + length;
+
+        MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
+        DElt.nbBits = (BYTE)(nbBits + consumed);
+        DElt.length = 2;
+        do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */
+
+        rankVal[weight] += length;
+    }
+}
+
+typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1];
+
+static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
+                           const sortedSymbol_t* sortedList, const U32 sortedListSize,
+                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
+                           const U32 nbBitsBaseline)
+{
+    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
+    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */
+    const U32 minBits  = nbBitsBaseline - maxWeight;
+    U32 s;
+
+    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+    /* fill DTable */
+    for (s=0; s<sortedListSize; s++)
+    {
+        const U16 symbol = sortedList[s].symbol;
+        const U32 weight = sortedList[s].weight;
+        const U32 nbBits = nbBitsBaseline - weight;
+        const U32 start = rankVal[weight];
+        const U32 length = 1 << (targetLog-nbBits);
+
+        if (targetLog-nbBits >= minBits)   /* enough room for a second symbol */
+        {
+            U32 sortedRank;
+            int minWeight = nbBits + scaleLog;
+            if (minWeight < 1) minWeight = 1;
+            sortedRank = rankStart[minWeight];
+            HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
+                           rankValOrigin[nbBits], minWeight,
+                           sortedList+sortedRank, sortedListSize-sortedRank,
+                           nbBitsBaseline, symbol);
+        }
+        else
+        {
+            U32 i;
+            const U32 end = start + length;
+            HUF_DEltX4 DElt;
+
+            MEM_writeLE16(&(DElt.sequence), symbol);
+            DElt.nbBits   = (BYTE)(nbBits);
+            DElt.length   = 1;
+            for (i = start; i < end; i++)
+                DTable[i] = DElt;
+        }
+        rankVal[weight] += length;
+    }
+}
+
+static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
+{
+    BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];
+    sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];
+    U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
+    U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
+    U32* const rankStart = rankStart0+1;
+    rankVal_t rankVal;
+    U32 tableLog, maxW, sizeOfSort, nbSymbols;
+    const U32 memLog = DTable[0];
+    const BYTE* ip = (const BYTE*) src;
+    size_t iSize = ip[0];
+    void* ptr = DTable;
+    HUF_DEltX4* const dt = ((HUF_DEltX4*)ptr) + 1;
+
+    HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32));   /* if compilation fails here, assertion is false */
+    if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
+    //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */
+
+    iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
+    if (HUF_isError(iSize)) return iSize;
+
+    /* check result */
+    if (tableLog > memLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */
+
+    /* find maxWeight */
+    for (maxW = tableLog; rankStats[maxW]==0; maxW--)
+        {if (!maxW) return ERROR(GENERIC); }  /* necessarily finds a solution before maxW==0 */
+
+    /* Get start index of each weight */
+    {
+        U32 w, nextRankStart = 0;
+        for (w=1; w<=maxW; w++)
+        {
+            U32 current = nextRankStart;
+            nextRankStart += rankStats[w];
+            rankStart[w] = current;
+        }
+        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/
+        sizeOfSort = nextRankStart;
+    }
+
+    /* sort symbols by weight */
+    {
+        U32 s;
+        for (s=0; s<nbSymbols; s++)
+        {
+            U32 w = weightList[s];
+            U32 r = rankStart[w]++;
+            sortedSymbol[r].symbol = (BYTE)s;
+            sortedSymbol[r].weight = (BYTE)w;
+        }
+        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
+    }
+
+    /* Build rankVal */
+    {
+        const U32 minBits = tableLog+1 - maxW;
+        U32 nextRankVal = 0;
+        U32 w, consumed;
+        const int rescale = (memLog-tableLog) - 1;   /* tableLog <= memLog */
+        U32* rankVal0 = rankVal[0];
+        for (w=1; w<=maxW; w++)
+        {
+            U32 current = nextRankVal;
+            nextRankVal += rankStats[w] << (w+rescale);
+            rankVal0[w] = current;
+        }
+        for (consumed = minBits; consumed <= memLog - minBits; consumed++)
+        {
+            U32* rankValPtr = rankVal[consumed];
+            for (w = 1; w <= maxW; w++)
+            {
+                rankValPtr[w] = rankVal0[w] >> consumed;
+            }
+        }
+    }
+
+    HUF_fillDTableX4(dt, memLog,
+                   sortedSymbol, sizeOfSort,
+                   rankStart0, rankVal, maxW,
+                   tableLog+1);
+
+    return iSize;
+}
+
+
+static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
+{
+    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    memcpy(op, dt+val, 2);
+    BIT_skipBits(DStream, dt[val].nbBits);
+    return dt[val].length;
+}
+
+static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
+{
+    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    memcpy(op, dt+val, 1);
+    if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
+    else
+    {
+        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))
+        {
+            BIT_skipBits(DStream, dt[val].nbBits);
+            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+        }
+    }
+    return 1;
+}
+
+
+#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
+    ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
+    if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
+        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
+    if (MEM_64bits()) \
+        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
+{
+    BYTE* const pStart = p;
+
+    /* up to 8 symbols at a time */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7))
+    {
+        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
+        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
+    }
+
+    /* closer to the end */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2))
+        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
+
+    while (p <= pEnd-2)
+        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
+
+    if (p < pEnd)
+        p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
+
+    return p-pStart;
+}
+
+
+
+static size_t HUF_decompress4X4_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const U32* DTable)
+{
+    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
+
+    {
+        const BYTE* const istart = (const BYTE*) cSrc;
+        BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+
+        const void* ptr = DTable;
+        const HUF_DEltX4* const dt = ((const HUF_DEltX4*)ptr) +1;
+        const U32 dtLog = DTable[0];
+        size_t errorCode;
+
+        /* Init */
+        BIT_DStream_t bitD1;
+        BIT_DStream_t bitD2;
+        BIT_DStream_t bitD3;
+        BIT_DStream_t bitD4;
+        const size_t length1 = MEM_readLE16(istart);
+        const size_t length2 = MEM_readLE16(istart+2);
+        const size_t length3 = MEM_readLE16(istart+4);
+        size_t length4;
+        const BYTE* const istart1 = istart + 6;  /* jumpTable */
+        const BYTE* const istart2 = istart1 + length1;
+        const BYTE* const istart3 = istart2 + length2;
+        const BYTE* const istart4 = istart3 + length3;
+        const size_t segmentSize = (dstSize+3) / 4;
+        BYTE* const opStart2 = ostart + segmentSize;
+        BYTE* const opStart3 = opStart2 + segmentSize;
+        BYTE* const opStart4 = opStart3 + segmentSize;
+        BYTE* op1 = ostart;
+        BYTE* op2 = opStart2;
+        BYTE* op3 = opStart3;
+        BYTE* op4 = opStart4;
+        U32 endSignal;
+
+        length4 = cSrcSize - (length1 + length2 + length3 + 6);
+        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
+        errorCode = BIT_initDStream(&bitD1, istart1, length1);
+        if (HUF_isError(errorCode)) return errorCode;
+        errorCode = BIT_initDStream(&bitD2, istart2, length2);
+        if (HUF_isError(errorCode)) return errorCode;
+        errorCode = BIT_initDStream(&bitD3, istart3, length3);
+        if (HUF_isError(errorCode)) return errorCode;
+        errorCode = BIT_initDStream(&bitD4, istart4, length4);
+        if (HUF_isError(errorCode)) return errorCode;
+
+        /* 16-32 symbols per loop (4-8 symbols per stream) */
+        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
+        {
+            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
+            HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
+            HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
+            HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
+            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
+            HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
+            HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
+            HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
+
+            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        }
+
+        /* check corruption */
+        if (op1 > opStart2) return ERROR(corruption_detected);
+        if (op2 > opStart3) return ERROR(corruption_detected);
+        if (op3 > opStart4) return ERROR(corruption_detected);
+        /* note : op4 supposed already verified within main loop */
+
+        /* finish bitStreams one by one */
+        HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
+        HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
+        HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
+        HUF_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);
+
+        /* check */
+        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+        if (!endSignal) return ERROR(corruption_detected);
+
+        /* decoded size */
+        return dstSize;
+    }
+}
+
+
+static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG);
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize);
+    if (HUF_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize;
+    cSrcSize -= hSize;
+
+    return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+/**********************************/
+/* quad-symbol decoding           */
+/**********************************/
+typedef struct { BYTE nbBits; BYTE nbBytes; } HUF_DDescX6;
+typedef union { BYTE byte[4]; U32 sequence; } HUF_DSeqX6;
+
+/* recursive, up to level 3; may benefit from <template>-like strategy to nest each level inline */
+static void HUF_fillDTableX6LevelN(HUF_DDescX6* DDescription, HUF_DSeqX6* DSequence, int sizeLog,
+                           const rankVal_t rankValOrigin, const U32 consumed, const int minWeight, const U32 maxWeight,
+                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, const U32* rankStart,
+                           const U32 nbBitsBaseline, HUF_DSeqX6 baseSeq, HUF_DDescX6 DDesc)
+{
+    const int scaleLog = nbBitsBaseline - sizeLog;   /* note : targetLog >= (nbBitsBaseline-1), hence scaleLog <= 1 */
+    const int minBits  = nbBitsBaseline - maxWeight;
+    const U32 level = DDesc.nbBytes;
+    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
+    U32 symbolStartPos, s;
+
+    /* local rankVal, will be modified */
+    memcpy(rankVal, rankValOrigin[consumed], sizeof(rankVal));
+
+    /* fill skipped values */
+    if (minWeight>1)
+    {
+        U32 i;
+        const U32 skipSize = rankVal[minWeight];
+        for (i = 0; i < skipSize; i++)
+        {
+            DSequence[i] = baseSeq;
+            DDescription[i] = DDesc;
+        }
+    }
+
+    /* fill DTable */
+    DDesc.nbBytes++;
+    symbolStartPos = rankStart[minWeight];
+    for (s=symbolStartPos; s<sortedListSize; s++)
+    {
+        const BYTE symbol = sortedSymbols[s].symbol;
+        const U32  weight = sortedSymbols[s].weight;   /* >= 1 (sorted) */
+        const int  nbBits = nbBitsBaseline - weight;   /* >= 1 (by construction) */
+        const int  totalBits = consumed+nbBits;
+        const U32  start  = rankVal[weight];
+        const U32  length = 1 << (sizeLog-nbBits);
+        baseSeq.byte[level] = symbol;
+        DDesc.nbBits = (BYTE)totalBits;
+
+        if ((level<3) && (sizeLog-totalBits >= minBits))   /* enough room for another symbol */
+        {
+            int nextMinWeight = totalBits + scaleLog;
+            if (nextMinWeight < 1) nextMinWeight = 1;
+            HUF_fillDTableX6LevelN(DDescription+start, DSequence+start, sizeLog-nbBits,
+                           rankValOrigin, totalBits, nextMinWeight, maxWeight,
+                           sortedSymbols, sortedListSize, rankStart,
+                           nbBitsBaseline, baseSeq, DDesc);   /* recursive (max : level 3) */
+        }
+        else
+        {
+            U32 i;
+            const U32 end = start + length;
+            for (i = start; i < end; i++)
+            {
+                DDescription[i] = DDesc;
+                DSequence[i] = baseSeq;
+            }
+        }
+        rankVal[weight] += length;
+    }
+}
+
+
+/* note : same preparation as X4 */
+static size_t HUF_readDTableX6 (U32* DTable, const void* src, size_t srcSize)
+{
+    BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];
+    sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];
+    U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
+    U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
+    U32* const rankStart = rankStart0+1;
+    U32 tableLog, maxW, sizeOfSort, nbSymbols;
+    rankVal_t rankVal;
+    const U32 memLog = DTable[0];
+    const BYTE* ip = (const BYTE*) src;
+    size_t iSize = ip[0];
+
+    if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
+    //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */
+
+    iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
+    if (HUF_isError(iSize)) return iSize;
+
+    /* check result */
+    if (tableLog > memLog) return ERROR(tableLog_tooLarge);   /* DTable is too small */
+
+    /* find maxWeight */
+    for (maxW = tableLog; rankStats[maxW]==0; maxW--)
+        { if (!maxW) return ERROR(GENERIC); }  /* necessarily finds a solution before maxW==0 */
+
+
+    /* Get start index of each weight */
+    {
+        U32 w, nextRankStart = 0;
+        for (w=1; w<=maxW; w++)
+        {
+            U32 current = nextRankStart;
+            nextRankStart += rankStats[w];
+            rankStart[w] = current;
+        }
+        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/
+        sizeOfSort = nextRankStart;
+    }
+
+    /* sort symbols by weight */
+    {
+        U32 s;
+        for (s=0; s<nbSymbols; s++)
+        {
+            U32 w = weightList[s];
+            U32 r = rankStart[w]++;
+            sortedSymbol[r].symbol = (BYTE)s;
+            sortedSymbol[r].weight = (BYTE)w;
+        }
+        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
+    }
+
+    /* Build rankVal */
+    {
+        const U32 minBits = tableLog+1 - maxW;
+        U32 nextRankVal = 0;
+        U32 w, consumed;
+        const int rescale = (memLog-tableLog) - 1;   /* tableLog <= memLog */
+        U32* rankVal0 = rankVal[0];
+        for (w=1; w<=maxW; w++)
+        {
+            U32 current = nextRankVal;
+            nextRankVal += rankStats[w] << (w+rescale);
+            rankVal0[w] = current;
+        }
+        for (consumed = minBits; consumed <= memLog - minBits; consumed++)
+        {
+            U32* rankValPtr = rankVal[consumed];
+            for (w = 1; w <= maxW; w++)
+            {
+                rankValPtr[w] = rankVal0[w] >> consumed;
+            }
+        }
+    }
+
+
+    /* fill tables */
+    {
+        void* ptr = DTable+1;
+        HUF_DDescX6* DDescription = (HUF_DDescX6*)(ptr);
+        void* dSeqStart = DTable + 1 + ((size_t)1<<(memLog-1));
+        HUF_DSeqX6* DSequence = (HUF_DSeqX6*)(dSeqStart);
+        HUF_DSeqX6 DSeq;
+        HUF_DDescX6 DDesc;
+        DSeq.sequence = 0;
+        DDesc.nbBits = 0;
+        DDesc.nbBytes = 0;
+        HUF_fillDTableX6LevelN(DDescription, DSequence, memLog,
+                       (const U32 (*)[HUF_ABSOLUTEMAX_TABLELOG + 1])rankVal, 0, 1, maxW,
+                       sortedSymbol, sizeOfSort, rankStart0,
+                       tableLog+1, DSeq, DDesc);
+    }
+
+    return iSize;
+}
+
+
+static U32 HUF_decodeSymbolX6(void* op, BIT_DStream_t* DStream, const HUF_DDescX6* dd, const HUF_DSeqX6* ds, const U32 dtLog)
+{
+    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    memcpy(op, ds+val, sizeof(HUF_DSeqX6));
+    BIT_skipBits(DStream, dd[val].nbBits);
+    return dd[val].nbBytes;
+}
+
+static U32 HUF_decodeLastSymbolsX6(void* op, const U32 maxL, BIT_DStream_t* DStream,
+                                  const HUF_DDescX6* dd, const HUF_DSeqX6* ds, const U32 dtLog)
+{
+    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    U32 length = dd[val].nbBytes;
+    if (length <= maxL)
+    {
+        memcpy(op, ds+val, length);
+        BIT_skipBits(DStream, dd[val].nbBits);
+        return length;
+    }
+    memcpy(op, ds+val, maxL);
+    if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))
+    {
+        BIT_skipBits(DStream, dd[val].nbBits);
+        if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+            DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+    }
+    return maxL;
+}
+
+
+#define HUF_DECODE_SYMBOLX6_0(ptr, DStreamPtr) \
+    ptr += HUF_decodeSymbolX6(ptr, DStreamPtr, dd, ds, dtLog)
+
+#define HUF_DECODE_SYMBOLX6_1(ptr, DStreamPtr) \
+    if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
+        HUF_DECODE_SYMBOLX6_0(ptr, DStreamPtr)
+
+#define HUF_DECODE_SYMBOLX6_2(ptr, DStreamPtr) \
+    if (MEM_64bits()) \
+        HUF_DECODE_SYMBOLX6_0(ptr, DStreamPtr)
+
+static inline size_t HUF_decodeStreamX6(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const U32* DTable, const U32 dtLog)
+{
+    const void* ddPtr = DTable+1;
+    const HUF_DDescX6* dd = (const HUF_DDescX6*)(ddPtr);
+    const void* dsPtr = DTable + 1 + ((size_t)1<<(dtLog-1));
+    const HUF_DSeqX6* ds = (const HUF_DSeqX6*)(dsPtr);
+    BYTE* const pStart = p;
+
+    /* up to 16 symbols at a time */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-16))
+    {
+        HUF_DECODE_SYMBOLX6_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX6_1(p, bitDPtr);
+        HUF_DECODE_SYMBOLX6_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX6_0(p, bitDPtr);
+    }
+
+    /* closer to the end, up to 4 symbols at a time */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))
+        HUF_DECODE_SYMBOLX6_0(p, bitDPtr);
+
+    while (p <= pEnd-4)
+        HUF_DECODE_SYMBOLX6_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
+
+    while (p < pEnd)
+        p += HUF_decodeLastSymbolsX6(p, (U32)(pEnd-p), bitDPtr, dd, ds, dtLog);
+
+    return p-pStart;
+}
+
+
+
+static size_t HUF_decompress4X6_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const U32* DTable)
+{
+    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
+
+    {
+        const BYTE* const istart = (const BYTE*) cSrc;
+        BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+
+        const U32 dtLog = DTable[0];
+        const void* ddPtr = DTable+1;
+        const HUF_DDescX6* dd = (const HUF_DDescX6*)(ddPtr);
+        const void* dsPtr = DTable + 1 + ((size_t)1<<(dtLog-1));
+        const HUF_DSeqX6* ds = (const HUF_DSeqX6*)(dsPtr);
+        size_t errorCode;
+
+        /* Init */
+        BIT_DStream_t bitD1;
+        BIT_DStream_t bitD2;
+        BIT_DStream_t bitD3;
+        BIT_DStream_t bitD4;
+        const size_t length1 = MEM_readLE16(istart);
+        const size_t length2 = MEM_readLE16(istart+2);
+        const size_t length3 = MEM_readLE16(istart+4);
+        size_t length4;
+        const BYTE* const istart1 = istart + 6;  /* jumpTable */
+        const BYTE* const istart2 = istart1 + length1;
+        const BYTE* const istart3 = istart2 + length2;
+        const BYTE* const istart4 = istart3 + length3;
+        const size_t segmentSize = (dstSize+3) / 4;
+        BYTE* const opStart2 = ostart + segmentSize;
+        BYTE* const opStart3 = opStart2 + segmentSize;
+        BYTE* const opStart4 = opStart3 + segmentSize;
+        BYTE* op1 = ostart;
+        BYTE* op2 = opStart2;
+        BYTE* op3 = opStart3;
+        BYTE* op4 = opStart4;
+        U32 endSignal;
+
+        length4 = cSrcSize - (length1 + length2 + length3 + 6);
+        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
+        errorCode = BIT_initDStream(&bitD1, istart1, length1);
+        if (HUF_isError(errorCode)) return errorCode;
+        errorCode = BIT_initDStream(&bitD2, istart2, length2);
+        if (HUF_isError(errorCode)) return errorCode;
+        errorCode = BIT_initDStream(&bitD3, istart3, length3);
+        if (HUF_isError(errorCode)) return errorCode;
+        errorCode = BIT_initDStream(&bitD4, istart4, length4);
+        if (HUF_isError(errorCode)) return errorCode;
+
+        /* 16-64 symbols per loop (4-16 symbols per stream) */
+        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        for ( ; (op3 <= opStart4) && (endSignal==BIT_DStream_unfinished) && (op4<=(oend-16)) ; )
+        {
+            HUF_DECODE_SYMBOLX6_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX6_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX6_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX6_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX6_1(op1, &bitD1);
+            HUF_DECODE_SYMBOLX6_1(op2, &bitD2);
+            HUF_DECODE_SYMBOLX6_1(op3, &bitD3);
+            HUF_DECODE_SYMBOLX6_1(op4, &bitD4);
+            HUF_DECODE_SYMBOLX6_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX6_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX6_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX6_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX6_0(op1, &bitD1);
+            HUF_DECODE_SYMBOLX6_0(op2, &bitD2);
+            HUF_DECODE_SYMBOLX6_0(op3, &bitD3);
+            HUF_DECODE_SYMBOLX6_0(op4, &bitD4);
+
+            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        }
+
+        /* check corruption */
+        if (op1 > opStart2) return ERROR(corruption_detected);
+        if (op2 > opStart3) return ERROR(corruption_detected);
+        if (op3 > opStart4) return ERROR(corruption_detected);
+        /* note : op4 supposed already verified within main loop */
+
+        /* finish bitStreams one by one */
+        HUF_decodeStreamX6(op1, &bitD1, opStart2, DTable, dtLog);
+        HUF_decodeStreamX6(op2, &bitD2, opStart3, DTable, dtLog);
+        HUF_decodeStreamX6(op3, &bitD3, opStart4, DTable, dtLog);
+        HUF_decodeStreamX6(op4, &bitD4, oend,     DTable, dtLog);
+
+        /* check */
+        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+        if (!endSignal) return ERROR(corruption_detected);
+
+        /* decoded size */
+        return dstSize;
+    }
+}
+
+
+static size_t HUF_decompress4X6 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUF_CREATE_STATIC_DTABLEX6(DTable, HUF_MAX_TABLELOG);
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t hSize = HUF_readDTableX6 (DTable, cSrc, cSrcSize);
+    if (HUF_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize;
+    cSrcSize -= hSize;
+
+    return HUF_decompress4X6_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+/**********************************/
+/* Generic decompression selector */
+/**********************************/
+
+typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
+static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
+{
+    /* single, double, quad */
+    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */
+    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */
+    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */
+    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */
+    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */
+    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */
+    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */
+    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */
+    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */
+    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */
+    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
+    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
+    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
+    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */
+    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */
+    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */
+};
+
+typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+
+static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    static const decompressionAlgo decompress[3] = { HUF_decompress4X2, HUF_decompress4X4, HUF_decompress4X6 };
+    /* estimate decompression time */
+    U32 Q;
+    const U32 D256 = (U32)(dstSize >> 8);
+    U32 Dtime[3];
+    U32 algoNb = 0;
+    int n;
+
+    /* validation checks */
+    if (dstSize == 0) return ERROR(dstSize_tooSmall);
+    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
+    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
+    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
+
+    /* decoder timing evaluation */
+    Q = (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 since dstSize > cSrcSize */
+    for (n=0; n<3; n++)
+        Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
+
+    Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
+
+    if (Dtime[1] < Dtime[0]) algoNb = 1;
+    if (Dtime[2] < Dtime[algoNb]) algoNb = 2;
+
+    return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
+
+    //return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize);   /* multi-streams single-symbol decoding */
+    //return HUF_decompress4X4(dst, dstSize, cSrc, cSrcSize);   /* multi-streams double-symbols decoding */
+    //return HUF_decompress4X6(dst, dstSize, cSrc, cSrcSize);   /* multi-streams quad-symbols decoding */
+}
+/*
+    zstd - standard compression library
+    Copyright (C) 2014-2015, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd source repository : https://github.com/Cyan4973/zstd
+    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+/* ***************************************************************
+*  Tuning parameters
+*****************************************************************/
+/*!
+*  MEMORY_USAGE :
+*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+*  Increasing memory usage improves compression ratio
+*  Reduced memory usage can improve speed, due to cache effect
+*/
+#define ZSTD_MEMORY_USAGE 17
+
+/*!
+ * HEAPMODE :
+ * Select how default compression functions will allocate memory for their hash table,
+ * in memory stack (0, fastest), or in memory heap (1, requires malloc())
+ * Note that compression context is fairly large, as a consequence heap memory is recommended.
+ */
+#ifndef ZSTD_HEAPMODE
+#  define ZSTD_HEAPMODE 1
+#endif /* ZSTD_HEAPMODE */
+
+/*!
+*  LEGACY_SUPPORT :
+*  decompressor can decode older formats (starting from Zstd 0.1+)
+*/
+#ifndef ZSTD_LEGACY_SUPPORT
+#  define ZSTD_LEGACY_SUPPORT 1
+#endif
+
+
+/* *******************************************************
+*  Includes
+*********************************************************/
+#include <stdlib.h>      /* calloc */
+#include <string.h>      /* memcpy, memmove */
+#include <stdio.h>       /* debug : printf */
+
+
+/* *******************************************************
+*  Compiler specifics
+*********************************************************/
+#ifdef __AVX2__
+#  include <immintrin.h>   /* AVX2 intrinsics */
+#endif
+
+#ifdef _MSC_VER    /* Visual Studio */
+#  include <intrin.h>                    /* For Visual 2005 */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */
+#endif
+
+
+/* *******************************************************
+*  Constants
+*********************************************************/
+#define HASH_LOG (ZSTD_MEMORY_USAGE - 2)
+#define HASH_TABLESIZE (1 << HASH_LOG)
+#define HASH_MASK (HASH_TABLESIZE - 1)
+
+#define KNUTH 2654435761
+
+#define BIT7 128
+#define BIT6  64
+#define BIT5  32
+#define BIT4  16
+#define BIT1   2
+#define BIT0   1
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BLOCKSIZE (128 KB)                 /* define, for static allocation */
+#define MIN_SEQUENCES_SIZE (2 /*seqNb*/ + 2 /*dumps*/ + 3 /*seqTables*/ + 1 /*bitStream*/)
+#define MIN_CBLOCK_SIZE (3 /*litCSize*/ + MIN_SEQUENCES_SIZE)
+#define IS_RAW BIT0
+#define IS_RLE BIT1
+
+#define WORKPLACESIZE (BLOCKSIZE*3)
+#define MINMATCH 4
+#define MLbits   7
+#define LLbits   6
+#define Offbits  5
+#define MaxML  ((1<<MLbits )-1)
+#define MaxLL  ((1<<LLbits )-1)
+#define MaxOff   31
+#define LitFSELog  11
+#define MLFSELog   10
+#define LLFSELog   10
+#define OffFSELog   9
+#define MAX(a,b) ((a)<(b)?(b):(a))
+#define MaxSeq MAX(MaxLL, MaxML)
+
+#define LITERAL_NOENTROPY 63
+#define COMMAND_NOENTROPY 7   /* to remove */
+
+static const size_t ZSTD_blockHeaderSize = 3;
+static const size_t ZSTD_frameHeaderSize = 4;
+
+
+/* *******************************************************
+*  Memory operations
+**********************************************************/
+static void   ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
+
+static void   ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
+
+#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
+
+/*! ZSTD_wildcopy : custom version of memcpy(), can copy up to 7-8 bytes too many */
+static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
+{
+    const BYTE* ip = (const BYTE*)src;
+    BYTE* op = (BYTE*)dst;
+    BYTE* const oend = op + length;
+    do COPY8(op, ip) while (op < oend);
+}
+
+
+/* **************************************
+*  Local structures
+****************************************/
+typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
+
+typedef struct
+{
+    blockType_t blockType;
+    U32 origSize;
+} blockProperties_t;
+
+typedef struct {
+    void* buffer;
+    U32*  offsetStart;
+    U32*  offset;
+    BYTE* offCodeStart;
+    BYTE* offCode;
+    BYTE* litStart;
+    BYTE* lit;
+    BYTE* litLengthStart;
+    BYTE* litLength;
+    BYTE* matchLengthStart;
+    BYTE* matchLength;
+    BYTE* dumpsStart;
+    BYTE* dumps;
+} seqStore_t;
+
+
+/* *************************************
+*  Error Management
+***************************************/
+/*! ZSTD_isError
+*   tells if a return value is an error code */
+static unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
+
+
+
+/* *************************************************************
+*   Decompression section
+***************************************************************/
+struct ZSTD_DCtx_s
+{
+    U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
+    U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
+    U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
+    void* previousDstEnd;
+    void* base;
+    size_t expected;
+    blockType_t bType;
+    U32 phase;
+    const BYTE* litPtr;
+    size_t litSize;
+    BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */];
+};   /* typedef'd to ZSTD_Dctx within "zstd_static.h" */
+
+
+static size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
+{
+    const BYTE* const in = (const BYTE* const)src;
+    BYTE headerFlags;
+    U32 cSize;
+
+    if (srcSize < 3) return ERROR(srcSize_wrong);
+
+    headerFlags = *in;
+    cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
+
+    bpPtr->blockType = (blockType_t)(headerFlags >> 6);
+    bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
+
+    if (bpPtr->blockType == bt_end) return 0;
+    if (bpPtr->blockType == bt_rle) return 1;
+    return cSize;
+}
+
+static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
+    memcpy(dst, src, srcSize);
+    return srcSize;
+}
+
+
+/** ZSTD_decompressLiterals
+    @return : nb of bytes read from src, or an error code*/
+static size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr,
+                                const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+
+    const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+    const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+
+    if (litSize > *maxDstSizePtr) return ERROR(corruption_detected);
+    if (litCSize + 5 > srcSize) return ERROR(corruption_detected);
+
+    if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected);
+
+    *maxDstSizePtr = litSize;
+    return litCSize + 5;
+}
+
+
+/** ZSTD_decodeLiteralsBlock
+    @return : nb of bytes read from src (< srcSize )*/
+static size_t ZSTD_decodeLiteralsBlock(void* ctx,
+                          const void* src, size_t srcSize)
+{
+    ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;
+    const BYTE* const istart = (const BYTE* const)src;
+
+    /* any compressed block with literals segment must be at least this size */
+    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
+
+    switch(*istart & 3)
+    {
+    default:
+    case 0:
+        {
+            size_t litSize = BLOCKSIZE;
+            const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize);
+            dctx->litPtr = dctx->litBuffer;
+            dctx->litSize = litSize;
+            memset(dctx->litBuffer + dctx->litSize, 0, 8);
+            return readSize;   /* works if it's an error too */
+        }
+    case IS_RAW:
+        {
+            const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+            if (litSize > srcSize-11)   /* risk of reading too far with wildcopy */
+            {
+                if (litSize > srcSize-3) return ERROR(corruption_detected);
+                memcpy(dctx->litBuffer, istart, litSize);
+                dctx->litPtr = dctx->litBuffer;
+                dctx->litSize = litSize;
+                memset(dctx->litBuffer + dctx->litSize, 0, 8);
+                return litSize+3;
+            }
+            /* direct reference into compressed stream */
+            dctx->litPtr = istart+3;
+            dctx->litSize = litSize;
+            return litSize+3;
+        }
+    case IS_RLE:
+        {
+            const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+            if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
+            memset(dctx->litBuffer, istart[3], litSize + 8);
+            dctx->litPtr = dctx->litBuffer;
+            dctx->litSize = litSize;
+            return 4;
+        }
+    }
+}
+
+
+static size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
+                         FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,
+                         const void* src, size_t srcSize)
+{
+    const BYTE* const istart = (const BYTE* const)src;
+    const BYTE* ip = istart;
+    const BYTE* const iend = istart + srcSize;
+    U32 LLtype, Offtype, MLtype;
+    U32 LLlog, Offlog, MLlog;
+    size_t dumpsLength;
+
+    /* check */
+    if (srcSize < 5) return ERROR(srcSize_wrong);
+
+    /* SeqHead */
+    *nbSeq = MEM_readLE16(ip); ip+=2;
+    LLtype  = *ip >> 6;
+    Offtype = (*ip >> 4) & 3;
+    MLtype  = (*ip >> 2) & 3;
+    if (*ip & 2)
+    {
+        dumpsLength  = ip[2];
+        dumpsLength += ip[1] << 8;
+        ip += 3;
+    }
+    else
+    {
+        dumpsLength  = ip[1];
+        dumpsLength += (ip[0] & 1) << 8;
+        ip += 2;
+    }
+    *dumpsPtr = ip;
+    ip += dumpsLength;
+    *dumpsLengthPtr = dumpsLength;
+
+    /* check */
+    if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
+
+    /* sequences */
+    {
+        S16 norm[MaxML+1];    /* assumption : MaxML >= MaxLL and MaxOff */
+        size_t headerSize;
+
+        /* Build DTables */
+        switch(LLtype)
+        {
+        case bt_rle :
+            LLlog = 0;
+            FSE_buildDTable_rle(DTableLL, *ip++); break;
+        case bt_raw :
+            LLlog = LLbits;
+            FSE_buildDTable_raw(DTableLL, LLbits); break;
+        default :
+            {   U32 max = MaxLL;
+                headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);
+                if (FSE_isError(headerSize)) return ERROR(GENERIC);
+                if (LLlog > LLFSELog) return ERROR(corruption_detected);
+                ip += headerSize;
+                FSE_buildDTable(DTableLL, norm, max, LLlog);
+        }   }
+
+        switch(Offtype)
+        {
+        case bt_rle :
+            Offlog = 0;
+            if (ip > iend-2) return ERROR(srcSize_wrong);   /* min : "raw", hence no header, but at least xxLog bits */
+            FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */
+            break;
+        case bt_raw :
+            Offlog = Offbits;
+            FSE_buildDTable_raw(DTableOffb, Offbits); break;
+        default :
+            {   U32 max = MaxOff;
+                headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);
+                if (FSE_isError(headerSize)) return ERROR(GENERIC);
+                if (Offlog > OffFSELog) return ERROR(corruption_detected);
+                ip += headerSize;
+                FSE_buildDTable(DTableOffb, norm, max, Offlog);
+        }   }
+
+        switch(MLtype)
+        {
+        case bt_rle :
+            MLlog = 0;
+            if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
+            FSE_buildDTable_rle(DTableML, *ip++); break;
+        case bt_raw :
+            MLlog = MLbits;
+            FSE_buildDTable_raw(DTableML, MLbits); break;
+        default :
+            {   U32 max = MaxML;
+                headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);
+                if (FSE_isError(headerSize)) return ERROR(GENERIC);
+                if (MLlog > MLFSELog) return ERROR(corruption_detected);
+                ip += headerSize;
+                FSE_buildDTable(DTableML, norm, max, MLlog);
+    }   }   }
+
+    return ip-istart;
+}
+
+
+typedef struct {
+    size_t litLength;
+    size_t offset;
+    size_t matchLength;
+} seq_t;
+
+typedef struct {
+    BIT_DStream_t DStream;
+    FSE_DState_t stateLL;
+    FSE_DState_t stateOffb;
+    FSE_DState_t stateML;
+    size_t prevOffset;
+    const BYTE* dumps;
+    const BYTE* dumpsEnd;
+} seqState_t;
+
+
+static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
+{
+    size_t litLength;
+    size_t prevOffset;
+    size_t offset;
+    size_t matchLength;
+    const BYTE* dumps = seqState->dumps;
+    const BYTE* const de = seqState->dumpsEnd;
+
+    /* Literal length */
+    litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
+    prevOffset = litLength ? seq->offset : seqState->prevOffset;
+    seqState->prevOffset = seq->offset;
+    if (litLength == MaxLL)
+    {
+        U32 add = *dumps++;
+        if (add < 255) litLength += add;
+        else
+        {
+            litLength = MEM_readLE32(dumps) & 0xFFFFFF;  /* no pb : dumps is always followed by seq tables > 1 byte */
+            dumps += 3;
+        }
+        if (dumps >= de) dumps = de-1;   /* late correction, to avoid read overflow (data is now corrupted anyway) */
+    }
+
+    /* Offset */
+    {
+        static const size_t offsetPrefix[MaxOff+1] = {  /* note : size_t faster than U32 */
+                1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,
+                512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,
+                524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };
+        U32 offsetCode, nbBits;
+        offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream));   /* <= maxOff, by table construction */
+        if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
+        nbBits = offsetCode - 1;
+        if (offsetCode==0) nbBits = 0;   /* cmove */
+        offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits);
+        if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
+        if (offsetCode==0) offset = prevOffset;   /* cmove */
+    }
+
+    /* MatchLength */
+    matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
+    if (matchLength == MaxML)
+    {
+        U32 add = *dumps++;
+        if (add < 255) matchLength += add;
+        else
+        {
+            matchLength = MEM_readLE32(dumps) & 0xFFFFFF;  /* no pb : dumps is always followed by seq tables > 1 byte */
+            dumps += 3;
+        }
+        if (dumps >= de) dumps = de-1;   /* late correction, to avoid read overflow (data is now corrupted anyway) */
+    }
+    matchLength += MINMATCH;
+
+    /* save result */
+    seq->litLength = litLength;
+    seq->offset = offset;
+    seq->matchLength = matchLength;
+    seqState->dumps = dumps;
+}
+
+
+static size_t ZSTD_execSequence(BYTE* op,
+                                seq_t sequence,
+                                const BYTE** litPtr, const BYTE* const litLimit,
+                                BYTE* const base, BYTE* const oend)
+{
+    static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
+    static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11};   /* substracted */
+    const BYTE* const ostart = op;
+    BYTE* const oLitEnd = op + sequence.litLength;
+    BYTE* const oMatchEnd = op + sequence.litLength + sequence.matchLength;   /* risk : address space overflow (32-bits) */
+    BYTE* const oend_8 = oend-8;
+    const BYTE* const litEnd = *litPtr + sequence.litLength;
+
+    /* checks */
+    if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall);   /* last match must start at a minimum distance of 8 from oend */
+    if (oMatchEnd > oend) return ERROR(dstSize_tooSmall);   /* overwrite beyond dst buffer */
+    if (litEnd > litLimit) return ERROR(corruption_detected);   /* overRead beyond lit buffer */
+
+    /* copy Literals */
+    ZSTD_wildcopy(op, *litPtr, sequence.litLength);   /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
+    op = oLitEnd;
+    *litPtr = litEnd;   /* update for next sequence */
+
+    /* copy Match */
+    {
+        const BYTE* match = op - sequence.offset;
+
+        /* check */
+        if (sequence.offset > (size_t)op) return ERROR(corruption_detected);   /* address space overflow test (this test seems kept by clang optimizer) */
+        //if (match > op) return ERROR(corruption_detected);   /* address space overflow test (is clang optimizer removing this test ?) */
+        if (match < base) return ERROR(corruption_detected);
+
+        /* close range match, overlap */
+        if (sequence.offset < 8)
+        {
+            const int dec64 = dec64table[sequence.offset];
+            op[0] = match[0];
+            op[1] = match[1];
+            op[2] = match[2];
+            op[3] = match[3];
+            match += dec32table[sequence.offset];
+            ZSTD_copy4(op+4, match);
+            match -= dec64;
+        }
+        else
+        {
+            ZSTD_copy8(op, match);
+        }
+        op += 8; match += 8;
+
+        if (oMatchEnd > oend-(16-MINMATCH))
+        {
+            if (op < oend_8)
+            {
+                ZSTD_wildcopy(op, match, oend_8 - op);
+                match += oend_8 - op;
+                op = oend_8;
+            }
+            while (op < oMatchEnd) *op++ = *match++;
+        }
+        else
+        {
+            ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
+        }
+    }
+
+    return oMatchEnd - ostart;
+}
+
+static size_t ZSTD_decompressSequences(
+                               void* ctx,
+                               void* dst, size_t maxDstSize,
+                         const void* seqStart, size_t seqSize)
+{
+    ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;
+    const BYTE* ip = (const BYTE*)seqStart;
+    const BYTE* const iend = ip + seqSize;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* op = ostart;
+    BYTE* const oend = ostart + maxDstSize;
+    size_t errorCode, dumpsLength;
+    const BYTE* litPtr = dctx->litPtr;
+    const BYTE* const litEnd = litPtr + dctx->litSize;
+    int nbSeq;
+    const BYTE* dumps;
+    U32* DTableLL = dctx->LLTable;
+    U32* DTableML = dctx->MLTable;
+    U32* DTableOffb = dctx->OffTable;
+    BYTE* const base = (BYTE*) (dctx->base);
+
+    /* Build Decoding Tables */
+    errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
+                                      DTableLL, DTableML, DTableOffb,
+                                      ip, iend-ip);
+    if (ZSTD_isError(errorCode)) return errorCode;
+    ip += errorCode;
+
+    /* Regen sequences */
+    {
+        seq_t sequence;
+        seqState_t seqState;
+
+        memset(&sequence, 0, sizeof(sequence));
+        seqState.dumps = dumps;
+        seqState.dumpsEnd = dumps + dumpsLength;
+        seqState.prevOffset = 1;
+        errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip);
+        if (ERR_isError(errorCode)) return ERROR(corruption_detected);
+        FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
+        FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
+        FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
+
+        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (nbSeq>0) ; )
+        {
+            size_t oneSeqSize;
+            nbSeq--;
+            ZSTD_decodeSequence(&sequence, &seqState);
+            oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend);
+            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+            op += oneSeqSize;
+        }
+
+        /* check if reached exact end */
+        if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected);   /* requested too much : data is corrupted */
+        if (nbSeq<0) return ERROR(corruption_detected);   /* requested too many sequences : data is corrupted */
+
+        /* last literal segment */
+        {
+            size_t lastLLSize = litEnd - litPtr;
+            if (litPtr > litEnd) return ERROR(corruption_detected);
+            if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
+            if (op != litPtr) memmove(op, litPtr, lastLLSize);
+            op += lastLLSize;
+        }
+    }
+
+    return op-ostart;
+}
+
+
+static size_t ZSTD_decompressBlock(
+                            void* ctx,
+                            void* dst, size_t maxDstSize,
+                      const void* src, size_t srcSize)
+{
+    /* blockType == blockCompressed */
+    const BYTE* ip = (const BYTE*)src;
+
+    /* Decode literals sub-block */
+    size_t litCSize = ZSTD_decodeLiteralsBlock(ctx, src, srcSize);
+    if (ZSTD_isError(litCSize)) return litCSize;
+    ip += litCSize;
+    srcSize -= litCSize;
+
+    return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize);
+}
+
+
+static size_t ZSTD_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+    const BYTE* iend = ip + srcSize;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* op = ostart;
+    BYTE* const oend = ostart + maxDstSize;
+    size_t remainingSize = srcSize;
+    U32 magicNumber;
+    blockProperties_t blockProperties;
+
+    /* Frame Header */
+    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+    magicNumber = MEM_readLE32(src);
+    if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
+    ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
+
+    /* Loop on each block */
+    while (1)
+    {
+        size_t decodedSize=0;
+        size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties);
+        if (ZSTD_isError(cBlockSize)) return cBlockSize;
+
+        ip += ZSTD_blockHeaderSize;
+        remainingSize -= ZSTD_blockHeaderSize;
+        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+        switch(blockProperties.blockType)
+        {
+        case bt_compressed:
+            decodedSize = ZSTD_decompressBlock(ctx, op, oend-op, ip, cBlockSize);
+            break;
+        case bt_raw :
+            decodedSize = ZSTD_copyUncompressedBlock(op, oend-op, ip, cBlockSize);
+            break;
+        case bt_rle :
+            return ERROR(GENERIC);   /* not yet supported */
+            break;
+        case bt_end :
+            /* end of frame */
+            if (remainingSize) return ERROR(srcSize_wrong);
+            break;
+        default:
+            return ERROR(GENERIC);   /* impossible */
+        }
+        if (cBlockSize == 0) break;   /* bt_end */
+
+        if (ZSTD_isError(decodedSize)) return decodedSize;
+        op += decodedSize;
+        ip += cBlockSize;
+        remainingSize -= cBlockSize;
+    }
+
+    return op-ostart;
+}
+
+static size_t ZSTD_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    ZSTD_DCtx ctx;
+    ctx.base = dst;
+    return ZSTD_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);
+}
+
+static size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
+{
+
+    const BYTE* ip = (const BYTE*)src;
+    size_t remainingSize = srcSize;
+    U32 magicNumber;
+    blockProperties_t blockProperties;
+
+    /* Frame Header */
+    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+    magicNumber = MEM_readLE32(src);
+    if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
+    ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
+
+    /* Loop on each block */
+    while (1)
+    {
+        size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+        if (ZSTD_isError(cBlockSize)) return cBlockSize;
+
+        ip += ZSTD_blockHeaderSize;
+        remainingSize -= ZSTD_blockHeaderSize;
+        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+        if (cBlockSize == 0) break;   /* bt_end */
+
+        ip += cBlockSize;
+        remainingSize -= cBlockSize;
+    }
+
+    return ip - (const BYTE*)src;
+}
+
+/*******************************
+*  Streaming Decompression API
+*******************************/
+
+static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx)
+{
+    dctx->expected = ZSTD_frameHeaderSize;
+    dctx->phase = 0;
+    dctx->previousDstEnd = NULL;
+    dctx->base = NULL;
+    return 0;
+}
+
+static ZSTD_DCtx* ZSTD_createDCtx(void)
+{
+    ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx));
+    if (dctx==NULL) return NULL;
+    ZSTD_resetDCtx(dctx);
+    return dctx;
+}
+
+static size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
+{
+    free(dctx);
+    return 0;
+}
+
+static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx)
+{
+    return dctx->expected;
+}
+
+static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    /* Sanity check */
+    if (srcSize != ctx->expected) return ERROR(srcSize_wrong);
+    if (dst != ctx->previousDstEnd)  /* not contiguous */
+        ctx->base = dst;
+
+    /* Decompress : frame header */
+    if (ctx->phase == 0)
+    {
+        /* Check frame magic header */
+        U32 magicNumber = MEM_readLE32(src);
+        if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
+        ctx->phase = 1;
+        ctx->expected = ZSTD_blockHeaderSize;
+        return 0;
+    }
+
+    /* Decompress : block header */
+    if (ctx->phase == 1)
+    {
+        blockProperties_t bp;
+        size_t blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
+        if (ZSTD_isError(blockSize)) return blockSize;
+        if (bp.blockType == bt_end)
+        {
+            ctx->expected = 0;
+            ctx->phase = 0;
+        }
+        else
+        {
+            ctx->expected = blockSize;
+            ctx->bType = bp.blockType;
+            ctx->phase = 2;
+        }
+
+        return 0;
+    }
+
+    /* Decompress : block content */
+    {
+        size_t rSize;
+        switch(ctx->bType)
+        {
+        case bt_compressed:
+            rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize);
+            break;
+        case bt_raw :
+            rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize);
+            break;
+        case bt_rle :
+            return ERROR(GENERIC);   /* not yet handled */
+            break;
+        case bt_end :   /* should never happen (filtered at phase 1) */
+            rSize = 0;
+            break;
+        default:
+            return ERROR(GENERIC);
+        }
+        ctx->phase = 1;
+        ctx->expected = ZSTD_blockHeaderSize;
+        ctx->previousDstEnd = (void*)( ((char*)dst) + rSize);
+        return rSize;
+    }
+
+}
+
+
+/* wrapper layer */
+
+unsigned ZSTDv02_isError(size_t code)
+{
+    return ZSTD_isError(code);
+}
+
+size_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize,
+                     const void* src, size_t compressedSize)
+{
+    return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);
+}
+
+size_t ZSTDv02_findFrameCompressedSize(const void *src, size_t compressedSize)
+{
+    return ZSTD_findFrameCompressedSize(src, compressedSize);
+}
+
+ZSTDv02_Dctx* ZSTDv02_createDCtx(void)
+{
+    return (ZSTDv02_Dctx*)ZSTD_createDCtx();
+}
+
+size_t ZSTDv02_freeDCtx(ZSTDv02_Dctx* dctx)
+{
+    return ZSTD_freeDCtx((ZSTD_DCtx*)dctx);
+}
+
+size_t ZSTDv02_resetDCtx(ZSTDv02_Dctx* dctx)
+{
+    return ZSTD_resetDCtx((ZSTD_DCtx*)dctx);
+}
+
+size_t ZSTDv02_nextSrcSizeToDecompress(ZSTDv02_Dctx* dctx)
+{
+    return ZSTD_nextSrcSizeToDecompress((ZSTD_DCtx*)dctx);
+}
+
+size_t ZSTDv02_decompressContinue(ZSTDv02_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    return ZSTD_decompressContinue((ZSTD_DCtx*)dctx, dst, maxDstSize, src, srcSize);
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_v02.h b/vendor/github.com/DataDog/zstd/zstd_v02.h
new file mode 100644
index 0000000..0dde7a6
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v02.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_V02_H_4174539423
+#define ZSTD_V02_H_4174539423
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* *************************************
+*  Includes
+***************************************/
+#include <stddef.h>   /* size_t */
+
+
+/* *************************************
+*  Simple one-step function
+***************************************/
+/**
+ZSTDv02_decompress() : decompress ZSTD frames compliant with v0.2.x format
+    compressedSize : is the exact source size
+    maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
+                      It must be equal or larger than originalSize, otherwise decompression will fail.
+    return : the number of bytes decompressed into destination buffer (originalSize)
+             or an errorCode if it fails (which can be tested using ZSTDv01_isError())
+*/
+size_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize,
+                     const void* src, size_t compressedSize);
+
+/**
+ZSTDv02_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.2.x format
+    compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
+    return : the number of bytes that would be read to decompress this frame
+             or an errorCode if it fails (which can be tested using ZSTDv02_isError())
+*/
+size_t ZSTDv02_findFrameCompressedSize(const void* src, size_t compressedSize);
+
+/**
+ZSTDv02_isError() : tells if the result of ZSTDv02_decompress() is an error
+*/
+unsigned ZSTDv02_isError(size_t code);
+
+
+/* *************************************
+*  Advanced functions
+***************************************/
+typedef struct ZSTDv02_Dctx_s ZSTDv02_Dctx;
+ZSTDv02_Dctx* ZSTDv02_createDCtx(void);
+size_t ZSTDv02_freeDCtx(ZSTDv02_Dctx* dctx);
+
+size_t ZSTDv02_decompressDCtx(void* ctx,
+                              void* dst, size_t maxOriginalSize,
+                        const void* src, size_t compressedSize);
+
+/* *************************************
+*  Streaming functions
+***************************************/
+size_t ZSTDv02_resetDCtx(ZSTDv02_Dctx* dctx);
+
+size_t ZSTDv02_nextSrcSizeToDecompress(ZSTDv02_Dctx* dctx);
+size_t ZSTDv02_decompressContinue(ZSTDv02_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
+/**
+  Use above functions alternatively.
+  ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+  ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
+  Result is the number of bytes regenerated within 'dst'.
+  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
+*/
+
+/* *************************************
+*  Prefix - version detection
+***************************************/
+#define ZSTDv02_magicNumber 0xFD2FB522   /* v0.2 */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_V02_H_4174539423 */
diff --git a/vendor/github.com/DataDog/zstd/zstd_v03.c b/vendor/github.com/DataDog/zstd/zstd_v03.c
new file mode 100644
index 0000000..54445af
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v03.c
@@ -0,0 +1,3124 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+#include <stddef.h>    /* size_t, ptrdiff_t */
+#include "zstd_v03.h"
+#include "error_private.h"
+
+
+/******************************************
+*  Compiler-specific
+******************************************/
+#if defined(_MSC_VER)   /* Visual Studio */
+#   include <stdlib.h>  /* _byteswap_ulong */
+#   include <intrin.h>  /* _byteswap_* */
+#endif
+
+
+
+/* ******************************************************************
+   mem.h
+   low-level memory access routines
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef MEM_H_MODULE
+#define MEM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/******************************************
+*  Includes
+******************************************/
+#include <stddef.h>    /* size_t, ptrdiff_t */
+#include <string.h>    /* memcpy */
+
+
+/******************************************
+*  Compiler-specific
+******************************************/
+#if defined(__GNUC__)
+#  define MEM_STATIC static __attribute__((unused))
+#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+#  define MEM_STATIC static inline
+#elif defined(_MSC_VER)
+#  define MEM_STATIC static __inline
+#else
+#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+
+/****************************************************************
+*  Basic Types
+*****************************************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# include <stdint.h>
+  typedef  uint8_t BYTE;
+  typedef uint16_t U16;
+  typedef  int16_t S16;
+  typedef uint32_t U32;
+  typedef  int32_t S32;
+  typedef uint64_t U64;
+  typedef  int64_t S64;
+#else
+  typedef unsigned char       BYTE;
+  typedef unsigned short      U16;
+  typedef   signed short      S16;
+  typedef unsigned int        U32;
+  typedef   signed int        S32;
+  typedef unsigned long long  U64;
+  typedef   signed long long  S64;
+#endif
+
+
+/****************************************************************
+*  Memory I/O
+*****************************************************************/
+/* MEM_FORCE_MEMORY_ACCESS
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ *            It can generate buggy code on targets generating assembly depending on alignment.
+ *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
+#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+#    define MEM_FORCE_MEMORY_ACCESS 2
+#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
+  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+#    define MEM_FORCE_MEMORY_ACCESS 1
+#  endif
+#endif
+
+MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }
+MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }
+
+MEM_STATIC unsigned MEM_isLittleEndian(void)
+{
+    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */
+    return one.c[0];
+}
+
+#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
+
+/* violates C standard on structure alignment.
+Only use if no other choice to achieve best performance on target platform */
+MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
+MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
+MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
+
+#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;
+
+MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
+MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
+
+#else
+
+/* default method, safe and standard.
+   can sometimes prove slower */
+
+MEM_STATIC U16 MEM_read16(const void* memPtr)
+{
+    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U32 MEM_read32(const void* memPtr)
+{
+    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U64 MEM_read64(const void* memPtr)
+{
+    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value)
+{
+    memcpy(memPtr, &value, sizeof(value));
+}
+
+
+#endif // MEM_FORCE_MEMORY_ACCESS
+
+
+MEM_STATIC U16 MEM_readLE16(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read16(memPtr);
+    else
+    {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U16)(p[0] + (p[1]<<8));
+    }
+}
+
+MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
+{
+    if (MEM_isLittleEndian())
+    {
+        MEM_write16(memPtr, val);
+    }
+    else
+    {
+        BYTE* p = (BYTE*)memPtr;
+        p[0] = (BYTE)val;
+        p[1] = (BYTE)(val>>8);
+    }
+}
+
+MEM_STATIC U32 MEM_readLE32(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read32(memPtr);
+    else
+    {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
+    }
+}
+
+MEM_STATIC U64 MEM_readLE64(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read64(memPtr);
+    else
+    {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
+                     + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
+    }
+}
+
+
+MEM_STATIC size_t MEM_readLEST(const void* memPtr)
+{
+    if (MEM_32bits())
+        return (size_t)MEM_readLE32(memPtr);
+    else
+        return (size_t)MEM_readLE64(memPtr);
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* MEM_H_MODULE */
+
+
+/* ******************************************************************
+   bitstream
+   Part of NewGen Entropy library
+   header file (to include)
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+   - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef BITSTREAM_H_MODULE
+#define BITSTREAM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*
+*  This API consists of small unitary functions, which highly benefit from being inlined.
+*  Since link-time-optimization is not available for all compilers,
+*  these functions are defined into a .h to be included.
+*/
+
+
+/**********************************************
+*  bitStream decompression API (read backward)
+**********************************************/
+typedef struct
+{
+    size_t   bitContainer;
+    unsigned bitsConsumed;
+    const char* ptr;
+    const char* start;
+} BIT_DStream_t;
+
+typedef enum { BIT_DStream_unfinished = 0,
+               BIT_DStream_endOfBuffer = 1,
+               BIT_DStream_completed = 2,
+               BIT_DStream_overflow = 3 } BIT_DStream_status;  /* result of BIT_reloadDStream() */
+               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+
+MEM_STATIC size_t   BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
+MEM_STATIC size_t   BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
+
+
+
+/******************************************
+*  unsafe API
+******************************************/
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
+/* faster, but works only if nbBits >= 1 */
+
+
+
+/****************************************************************
+*  Helper functions
+****************************************************************/
+MEM_STATIC unsigned BIT_highbit32 (U32 val)
+{
+#   if defined(_MSC_VER)   /* Visual */
+    unsigned long r=0;
+    _BitScanReverse ( &r, val );
+    return (unsigned) r;
+#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */
+    return 31 - __builtin_clz (val);
+#   else   /* Software version */
+    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+    U32 v = val;
+    unsigned r;
+    v |= v >> 1;
+    v |= v >> 2;
+    v |= v >> 4;
+    v |= v >> 8;
+    v |= v >> 16;
+    r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+    return r;
+#   endif
+}
+
+
+
+/**********************************************************
+* bitStream decoding
+**********************************************************/
+
+/*!BIT_initDStream
+*  Initialize a BIT_DStream_t.
+*  @bitD : a pointer to an already allocated BIT_DStream_t structure
+*  @srcBuffer must point at the beginning of a bitStream
+*  @srcSize must be the exact size of the bitStream
+*  @result : size of stream (== srcSize) or an errorCode if a problem is detected
+*/
+MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
+{
+    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
+
+    if (srcSize >=  sizeof(size_t))   /* normal case */
+    {
+        U32 contain32;
+        bitD->start = (const char*)srcBuffer;
+        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(size_t);
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);
+        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
+        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */
+        bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
+    }
+    else
+    {
+        U32 contain32;
+        bitD->start = (const char*)srcBuffer;
+        bitD->ptr   = bitD->start;
+        bitD->bitContainer = *(const BYTE*)(bitD->start);
+        switch(srcSize)
+        {
+            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);
+            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);
+            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);
+            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;
+            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;
+            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) <<  8;
+            default:;
+        }
+        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
+        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */
+        bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
+        bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
+    }
+
+    return srcSize;
+}
+MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
+    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
+}
+
+/*! BIT_lookBitsFast :
+*   unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits)
+{
+    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
+    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
+}
+
+MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+    bitD->bitsConsumed += nbBits;
+}
+
+MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+    size_t value = BIT_lookBits(bitD, nbBits);
+    BIT_skipBits(bitD, nbBits);
+    return value;
+}
+
+/*!BIT_readBitsFast :
+*  unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
+{
+    size_t value = BIT_lookBitsFast(bitD, nbBits);
+    BIT_skipBits(bitD, nbBits);
+    return value;
+}
+
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
+{
+    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
+        return BIT_DStream_overflow;
+
+    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
+    {
+        bitD->ptr -= bitD->bitsConsumed >> 3;
+        bitD->bitsConsumed &= 7;
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);
+        return BIT_DStream_unfinished;
+    }
+    if (bitD->ptr == bitD->start)
+    {
+        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
+        return BIT_DStream_completed;
+    }
+    {
+        U32 nbBytes = bitD->bitsConsumed >> 3;
+        BIT_DStream_status result = BIT_DStream_unfinished;
+        if (bitD->ptr - nbBytes < bitD->start)
+        {
+            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */
+            result = BIT_DStream_endOfBuffer;
+        }
+        bitD->ptr -= nbBytes;
+        bitD->bitsConsumed -= nbBytes*8;
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */
+        return result;
+    }
+}
+
+/*! BIT_endOfDStream
+*   @return Tells if DStream has reached its exact end
+*/
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
+{
+    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* BITSTREAM_H_MODULE */
+/* ******************************************************************
+   Error codes and messages
+   Copyright (C) 2013-2015, Yann Collet
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+   - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef ERROR_H_MODULE
+#define ERROR_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/******************************************
+*  Compiler-specific
+******************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+#  define ERR_STATIC static inline
+#elif defined(_MSC_VER)
+#  define ERR_STATIC static __inline
+#elif defined(__GNUC__)
+#  define ERR_STATIC static __attribute__((unused))
+#else
+#  define ERR_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+
+/******************************************
+*  Error Management
+******************************************/
+#define PREFIX(name) ZSTD_error_##name
+
+#define ERROR(name) (size_t)-PREFIX(name)
+
+#define ERROR_LIST(ITEM) \
+        ITEM(PREFIX(No_Error)) ITEM(PREFIX(GENERIC)) \
+        ITEM(PREFIX(dstSize_tooSmall)) ITEM(PREFIX(srcSize_wrong)) \
+        ITEM(PREFIX(prefix_unknown)) ITEM(PREFIX(corruption_detected)) \
+        ITEM(PREFIX(tableLog_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooSmall)) \
+        ITEM(PREFIX(maxCode))
+
+#define ERROR_GENERATE_ENUM(ENUM) ENUM,
+typedef enum { ERROR_LIST(ERROR_GENERATE_ENUM) } ERR_codes;  /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */
+
+#define ERROR_CONVERTTOSTRING(STRING) #STRING,
+#define ERROR_GENERATE_STRING(EXPR) ERROR_CONVERTTOSTRING(EXPR)
+static const char* ERR_strings[] = { ERROR_LIST(ERROR_GENERATE_STRING) };
+
+ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
+
+ERR_STATIC const char* ERR_getErrorName(size_t code)
+{
+    static const char* codeError = "Unspecified error code";
+    if (ERR_isError(code)) return ERR_strings[-(int)(code)];
+    return codeError;
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ERROR_H_MODULE */
+/*
+Constructor and Destructor of type FSE_CTable
+    Note that its size depends on 'tableLog' and 'maxSymbolValue' */
+typedef unsigned FSE_CTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
+typedef unsigned FSE_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
+
+
+/* ******************************************************************
+   FSE : Finite State Entropy coder
+   header file for static linking (only)
+   Copyright (C) 2013-2015, Yann Collet
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+   - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/******************************************
+*  Static allocation
+******************************************/
+/* FSE buffer bounds */
+#define FSE_NCOUNTBOUND 512
+#define FSE_BLOCKBOUND(size) (size + (size>>7))
+#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
+
+/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */
+#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue)   (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
+#define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))
+
+
+/******************************************
+*  FSE advanced API
+******************************************/
+static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
+/* build a fake FSE_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
+
+static size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
+/* build a fake FSE_DTable, designed to always generate the same symbolValue */
+
+
+/******************************************
+*  FSE symbol decompression API
+******************************************/
+typedef struct
+{
+    size_t      state;
+    const void* table;   /* precise table may vary, depending on U16 */
+} FSE_DState_t;
+
+
+static void     FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
+
+static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+
+static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
+
+
+/******************************************
+*  FSE unsafe API
+******************************************/
+static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
+
+
+/******************************************
+*  Implementation of inline functions
+******************************************/
+
+/* decompression */
+
+typedef struct {
+    U16 tableLog;
+    U16 fastMode;
+} FSE_DTableHeader;   /* sizeof U32 */
+
+typedef struct
+{
+    unsigned short newState;
+    unsigned char  symbol;
+    unsigned char  nbBits;
+} FSE_decode_t;   /* size == U32 */
+
+MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
+{
+    FSE_DTableHeader DTableH;
+    memcpy(&DTableH, dt, sizeof(DTableH));
+    DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog);
+    BIT_reloadDStream(bitD);
+    DStatePtr->table = dt + 1;
+}
+
+MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    const U32  nbBits = DInfo.nbBits;
+    BYTE symbol = DInfo.symbol;
+    size_t lowBits = BIT_readBits(bitD, nbBits);
+
+    DStatePtr->state = DInfo.newState + lowBits;
+    return symbol;
+}
+
+MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    const U32 nbBits = DInfo.nbBits;
+    BYTE symbol = DInfo.symbol;
+    size_t lowBits = BIT_readBitsFast(bitD, nbBits);
+
+    DStatePtr->state = DInfo.newState + lowBits;
+    return symbol;
+}
+
+MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
+{
+    return DStatePtr->state == 0;
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+/* ******************************************************************
+   Huff0 : Huffman coder, part of New Generation Entropy library
+   header file for static linking (only)
+   Copyright (C) 2013-2015, Yann Collet
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+   - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/******************************************
+*  Static allocation macros
+******************************************/
+/* Huff0 buffer bounds */
+#define HUF_CTABLEBOUND 129
+#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true if incompressible pre-filtered with fast heuristic */
+#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
+
+/* static allocation of Huff0's DTable */
+#define HUF_DTABLE_SIZE(maxTableLog)   (1 + (1<<maxTableLog))  /* nb Cells; use unsigned short for X2, unsigned int for X4 */
+#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
+        unsigned short DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
+#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
+        unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
+#define HUF_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
+        unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
+
+
+/******************************************
+*  Advanced functions
+******************************************/
+static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
+static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbols decoder */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+/*
+    zstd - standard compression library
+    Header File
+    Copyright (C) 2014-2015, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd source repository : https://github.com/Cyan4973/zstd
+    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* *************************************
+*  Includes
+***************************************/
+#include <stddef.h>   /* size_t */
+
+
+/* *************************************
+*  Version
+***************************************/
+#define ZSTD_VERSION_MAJOR    0    /* for breaking interface changes  */
+#define ZSTD_VERSION_MINOR    2    /* for new (non-breaking) interface capabilities */
+#define ZSTD_VERSION_RELEASE  2    /* for tweaks, bug-fixes, or development */
+#define ZSTD_VERSION_NUMBER  (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
+
+
+/* *************************************
+*  Advanced functions
+***************************************/
+typedef struct ZSTD_CCtx_s ZSTD_CCtx;   /* incomplete type */
+
+#if defined (__cplusplus)
+}
+#endif
+/*
+    zstd - standard compression library
+    Header File for static linking only
+    Copyright (C) 2014-2015, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd source repository : https://github.com/Cyan4973/zstd
+    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+/* The objects defined into this file should be considered experimental.
+ * They are not labelled stable, as their prototype may change in the future.
+ * You can use them for tests, provide feedback, or if you can endure risk of future changes.
+ */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* *************************************
+*  Streaming functions
+***************************************/
+
+typedef struct ZSTD_DCtx_s ZSTD_DCtx;
+
+/*
+  Use above functions alternatively.
+  ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+  ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
+  Result is the number of bytes regenerated within 'dst'.
+  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
+*/
+
+/* *************************************
+*  Prefix - version detection
+***************************************/
+#define ZSTD_magicNumber 0xFD2FB523   /* v0.3 */
+
+
+#if defined (__cplusplus)
+}
+#endif
+/* ******************************************************************
+   FSE : Finite State Entropy coder
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/****************************************************************
+*  Tuning parameters
+****************************************************************/
+/* MEMORY_USAGE :
+*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+*  Increasing memory usage improves compression ratio
+*  Reduced memory usage can improve speed, due to cache effect
+*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#define FSE_MAX_MEMORY_USAGE 14
+#define FSE_DEFAULT_MEMORY_USAGE 13
+
+/* FSE_MAX_SYMBOL_VALUE :
+*  Maximum symbol value authorized.
+*  Required for proper stack allocation */
+#define FSE_MAX_SYMBOL_VALUE 255
+
+
+/****************************************************************
+*  template functions type & suffix
+****************************************************************/
+#define FSE_FUNCTION_TYPE BYTE
+#define FSE_FUNCTION_EXTENSION
+
+
+/****************************************************************
+*  Byte symbol type
+****************************************************************/
+#endif   /* !FSE_COMMONDEFS_ONLY */
+
+
+/****************************************************************
+*  Compiler specifics
+****************************************************************/
+#ifdef _MSC_VER    /* Visual Studio */
+#  define FORCE_INLINE static __forceinline
+#  include <intrin.h>                    /* For Visual 2005 */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */
+#else
+#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
+#    ifdef __GNUC__
+#      define FORCE_INLINE static inline __attribute__((always_inline))
+#    else
+#      define FORCE_INLINE static inline
+#    endif
+#  else
+#    define FORCE_INLINE static
+#  endif /* __STDC_VERSION__ */
+#endif
+
+
+/****************************************************************
+*  Includes
+****************************************************************/
+#include <stdlib.h>     /* malloc, free, qsort */
+#include <string.h>     /* memcpy, memset */
+#include <stdio.h>      /* printf (debug) */
+
+/****************************************************************
+*  Constants
+*****************************************************************/
+#define FSE_MAX_TABLELOG  (FSE_MAX_MEMORY_USAGE-2)
+#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
+#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
+#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
+#define FSE_MIN_TABLELOG 5
+
+#define FSE_TABLELOG_ABSOLUTE_MAX 15
+#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
+#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+
+/****************************************************************
+*  Error Management
+****************************************************************/
+#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+
+
+/****************************************************************
+*  Complex types
+****************************************************************/
+typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
+
+
+/****************************************************************
+*  Templates
+****************************************************************/
+/*
+  designed to be included
+  for type-specific functions (template emulation in C)
+  Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+#  error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+#  error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+
+/* Function templates */
+
+#define FSE_DECODE_TYPE FSE_decode_t
+
+static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
+
+static size_t FSE_buildDTable
+(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+    void* ptr = dt+1;
+    FSE_DTableHeader DTableH;
+    FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)ptr;
+    const U32 tableSize = 1 << tableLog;
+    const U32 tableMask = tableSize-1;
+    const U32 step = FSE_tableStep(tableSize);
+    U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
+    U32 position = 0;
+    U32 highThreshold = tableSize-1;
+    const S16 largeLimit= (S16)(1 << (tableLog-1));
+    U32 noLarge = 1;
+    U32 s;
+
+    /* Sanity Checks */
+    if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
+    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+
+    /* Init, lay down lowprob symbols */
+    DTableH.tableLog = (U16)tableLog;
+    for (s=0; s<=maxSymbolValue; s++)
+    {
+        if (normalizedCounter[s]==-1)
+        {
+            tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
+            symbolNext[s] = 1;
+        }
+        else
+        {
+            if (normalizedCounter[s] >= largeLimit) noLarge=0;
+            symbolNext[s] = normalizedCounter[s];
+        }
+    }
+
+    /* Spread symbols */
+    for (s=0; s<=maxSymbolValue; s++)
+    {
+        int i;
+        for (i=0; i<normalizedCounter[s]; i++)
+        {
+            tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
+            position = (position + step) & tableMask;
+            while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
+        }
+    }
+
+    if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+
+    /* Build Decoding table */
+    {
+        U32 i;
+        for (i=0; i<tableSize; i++)
+        {
+            FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);
+            U16 nextState = symbolNext[symbol]++;
+            tableDecode[i].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) );
+            tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
+        }
+    }
+
+    DTableH.fastMode = (U16)noLarge;
+    memcpy(dt, &DTableH, sizeof(DTableH));
+    return 0;
+}
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+/******************************************
+*  FSE helper functions
+******************************************/
+static unsigned FSE_isError(size_t code) { return ERR_isError(code); }
+
+
+/****************************************************************
+*  FSE NCount encoding-decoding
+****************************************************************/
+static short FSE_abs(short a)
+{
+    return a<0 ? -a : a;
+}
+
+static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+                 const void* headerBuffer, size_t hbSize)
+{
+    const BYTE* const istart = (const BYTE*) headerBuffer;
+    const BYTE* const iend = istart + hbSize;
+    const BYTE* ip = istart;
+    int nbBits;
+    int remaining;
+    int threshold;
+    U32 bitStream;
+    int bitCount;
+    unsigned charnum = 0;
+    int previous0 = 0;
+
+    if (hbSize < 4) return ERROR(srcSize_wrong);
+    bitStream = MEM_readLE32(ip);
+    nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */
+    if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
+    bitStream >>= 4;
+    bitCount = 4;
+    *tableLogPtr = nbBits;
+    remaining = (1<<nbBits)+1;
+    threshold = 1<<nbBits;
+    nbBits++;
+
+    while ((remaining>1) && (charnum<=*maxSVPtr))
+    {
+        if (previous0)
+        {
+            unsigned n0 = charnum;
+            while ((bitStream & 0xFFFF) == 0xFFFF)
+            {
+                n0+=24;
+                if (ip < iend-5)
+                {
+                    ip+=2;
+                    bitStream = MEM_readLE32(ip) >> bitCount;
+                }
+                else
+                {
+                    bitStream >>= 16;
+                    bitCount+=16;
+                }
+            }
+            while ((bitStream & 3) == 3)
+            {
+                n0+=3;
+                bitStream>>=2;
+                bitCount+=2;
+            }
+            n0 += bitStream & 3;
+            bitCount += 2;
+            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
+            while (charnum < n0) normalizedCounter[charnum++] = 0;
+            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
+            {
+                ip += bitCount>>3;
+                bitCount &= 7;
+                bitStream = MEM_readLE32(ip) >> bitCount;
+            }
+            else
+                bitStream >>= 2;
+        }
+        {
+            const short max = (short)((2*threshold-1)-remaining);
+            short count;
+
+            if ((bitStream & (threshold-1)) < (U32)max)
+            {
+                count = (short)(bitStream & (threshold-1));
+                bitCount   += nbBits-1;
+            }
+            else
+            {
+                count = (short)(bitStream & (2*threshold-1));
+                if (count >= threshold) count -= max;
+                bitCount   += nbBits;
+            }
+
+            count--;   /* extra accuracy */
+            remaining -= FSE_abs(count);
+            normalizedCounter[charnum++] = count;
+            previous0 = !count;
+            while (remaining < threshold)
+            {
+                nbBits--;
+                threshold >>= 1;
+            }
+
+            {
+                if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
+                {
+                    ip += bitCount>>3;
+                    bitCount &= 7;
+                }
+                else
+                {
+                    bitCount -= (int)(8 * (iend - 4 - ip));
+                    ip = iend - 4;
+                }
+                bitStream = MEM_readLE32(ip) >> (bitCount & 31);
+            }
+        }
+    }
+    if (remaining != 1) return ERROR(GENERIC);
+    *maxSVPtr = charnum-1;
+
+    ip += (bitCount+7)>>3;
+    if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
+    return ip-istart;
+}
+
+
+/*********************************************************
+*  Decompression (Byte symbols)
+*********************************************************/
+static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
+{
+    void* ptr = dt;
+    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+    FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1;
+
+    DTableH->tableLog = 0;
+    DTableH->fastMode = 0;
+
+    cell->newState = 0;
+    cell->symbol = symbolValue;
+    cell->nbBits = 0;
+
+    return 0;
+}
+
+
+static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
+{
+    void* ptr = dt;
+    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+    FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1;
+    const unsigned tableSize = 1 << nbBits;
+    const unsigned tableMask = tableSize - 1;
+    const unsigned maxSymbolValue = tableMask;
+    unsigned s;
+
+    /* Sanity checks */
+    if (nbBits < 1) return ERROR(GENERIC);         /* min size */
+
+    /* Build Decoding Table */
+    DTableH->tableLog = (U16)nbBits;
+    DTableH->fastMode = 1;
+    for (s=0; s<=maxSymbolValue; s++)
+    {
+        dinfo[s].newState = 0;
+        dinfo[s].symbol = (BYTE)s;
+        dinfo[s].nbBits = (BYTE)nbBits;
+    }
+
+    return 0;
+}
+
+FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
+          void* dst, size_t maxDstSize,
+    const void* cSrc, size_t cSrcSize,
+    const FSE_DTable* dt, const unsigned fast)
+{
+    BYTE* const ostart = (BYTE*) dst;
+    BYTE* op = ostart;
+    BYTE* const omax = op + maxDstSize;
+    BYTE* const olimit = omax-3;
+
+    BIT_DStream_t bitD;
+    FSE_DState_t state1;
+    FSE_DState_t state2;
+    size_t errorCode;
+
+    /* Init */
+    errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);   /* replaced last arg by maxCompressed Size */
+    if (FSE_isError(errorCode)) return errorCode;
+
+    FSE_initDState(&state1, &bitD, dt);
+    FSE_initDState(&state2, &bitD, dt);
+
+#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
+
+    /* 4 symbols per loop */
+    for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op<olimit) ; op+=4)
+    {
+        op[0] = FSE_GETSYMBOL(&state1);
+
+        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            BIT_reloadDStream(&bitD);
+
+        op[1] = FSE_GETSYMBOL(&state2);
+
+        if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
+
+        op[2] = FSE_GETSYMBOL(&state1);
+
+        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            BIT_reloadDStream(&bitD);
+
+        op[3] = FSE_GETSYMBOL(&state2);
+    }
+
+    /* tail */
+    /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
+    while (1)
+    {
+        if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )
+            break;
+
+        *op++ = FSE_GETSYMBOL(&state1);
+
+        if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )
+            break;
+
+        *op++ = FSE_GETSYMBOL(&state2);
+    }
+
+    /* end ? */
+    if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))
+        return op-ostart;
+
+    if (op==omax) return ERROR(dstSize_tooSmall);   /* dst buffer is full, but cSrc unfinished */
+
+    return ERROR(corruption_detected);
+}
+
+
+static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
+                            const void* cSrc, size_t cSrcSize,
+                            const FSE_DTable* dt)
+{
+    FSE_DTableHeader DTableH;
+    memcpy(&DTableH, dt, sizeof(DTableH));
+
+    /* select fast mode (static) */
+    if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+    return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+
+static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
+{
+    const BYTE* const istart = (const BYTE*)cSrc;
+    const BYTE* ip = istart;
+    short counting[FSE_MAX_SYMBOL_VALUE+1];
+    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */
+    unsigned tableLog;
+    unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+    size_t errorCode;
+
+    if (cSrcSize<2) return ERROR(srcSize_wrong);   /* too small input size */
+
+    /* normal FSE decoding mode */
+    errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
+    if (FSE_isError(errorCode)) return errorCode;
+    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size */
+    ip += errorCode;
+    cSrcSize -= errorCode;
+
+    errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);
+    if (FSE_isError(errorCode)) return errorCode;
+
+    /* always return, even if it is an error code */
+    return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
+}
+
+
+
+#endif   /* FSE_COMMONDEFS_ONLY */
+/* ******************************************************************
+   Huff0 : Huffman coder, part of New Generation Entropy library
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/****************************************************************
+*  Compiler specifics
+****************************************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+/* inline is defined */
+#elif defined(_MSC_VER)
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#  define inline __inline
+#else
+#  define inline /* disable inline */
+#endif
+
+
+/****************************************************************
+*  Includes
+****************************************************************/
+#include <stdlib.h>     /* malloc, free, qsort */
+#include <string.h>     /* memcpy, memset */
+#include <stdio.h>      /* printf (debug) */
+
+/****************************************************************
+*  Error Management
+****************************************************************/
+#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+
+
+/******************************************
+*  Helper functions
+******************************************/
+static unsigned HUF_isError(size_t code) { return ERR_isError(code); }
+
+#define HUF_ABSOLUTEMAX_TABLELOG  16   /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
+#define HUF_MAX_TABLELOG  12           /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
+#define HUF_DEFAULT_TABLELOG  HUF_MAX_TABLELOG   /* tableLog by default, when not specified */
+#define HUF_MAX_SYMBOL_VALUE 255
+#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)
+#  error "HUF_MAX_TABLELOG is too large !"
+#endif
+
+
+
+/*********************************************************
+*  Huff0 : Huffman block decompression
+*********************************************************/
+typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2;   /* single-symbol decoding */
+
+typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4;  /* double-symbols decoding */
+
+typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
+
+/*! HUF_readStats
+    Read compact Huffman tree, saved by HUF_writeCTable
+    @huffWeight : destination buffer
+    @return : size read from `src`
+*/
+static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+                            U32* nbSymbolsPtr, U32* tableLogPtr,
+                            const void* src, size_t srcSize)
+{
+    U32 weightTotal;
+    U32 tableLog;
+    const BYTE* ip = (const BYTE*) src;
+    size_t iSize;
+    size_t oSize;
+    U32 n;
+
+    if (!srcSize) return ERROR(srcSize_wrong);
+    iSize = ip[0];
+    //memset(huffWeight, 0, hwSize);   /* is not necessary, even though some analyzer complain ... */
+
+    if (iSize >= 128)  /* special header */
+    {
+        if (iSize >= (242))   /* RLE */
+        {
+            static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
+            oSize = l[iSize-242];
+            memset(huffWeight, 1, hwSize);
+            iSize = 0;
+        }
+        else   /* Incompressible */
+        {
+            oSize = iSize - 127;
+            iSize = ((oSize+1)/2);
+            if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+            if (oSize >= hwSize) return ERROR(corruption_detected);
+            ip += 1;
+            for (n=0; n<oSize; n+=2)
+            {
+                huffWeight[n]   = ip[n/2] >> 4;
+                huffWeight[n+1] = ip[n/2] & 15;
+            }
+        }
+    }
+    else  /* header compressed with FSE (normal case) */
+    {
+        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+        oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize);   /* max (hwSize-1) values decoded, as last one is implied */
+        if (FSE_isError(oSize)) return oSize;
+    }
+
+    /* collect weight stats */
+    memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
+    weightTotal = 0;
+    for (n=0; n<oSize; n++)
+    {
+        if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
+        rankStats[huffWeight[n]]++;
+        weightTotal += (1 << huffWeight[n]) >> 1;
+    }
+    if (weightTotal == 0) return ERROR(corruption_detected);
+
+    /* get last non-null symbol weight (implied, total must be 2^n) */
+    tableLog = BIT_highbit32(weightTotal) + 1;
+    if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
+    {
+        U32 total = 1 << tableLog;
+        U32 rest = total - weightTotal;
+        U32 verif = 1 << BIT_highbit32(rest);
+        U32 lastWeight = BIT_highbit32(rest) + 1;
+        if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */
+        huffWeight[oSize] = (BYTE)lastWeight;
+        rankStats[lastWeight]++;
+    }
+
+    /* check tree construction validity */
+    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */
+
+    /* results */
+    *nbSymbolsPtr = (U32)(oSize+1);
+    *tableLogPtr = tableLog;
+    return iSize+1;
+}
+
+
+/**************************/
+/* single-symbol decoding */
+/**************************/
+
+static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
+{
+    BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];
+    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];   /* large enough for values from 0 to 16 */
+    U32 tableLog = 0;
+    const BYTE* ip = (const BYTE*) src;
+    size_t iSize = ip[0];
+    U32 nbSymbols = 0;
+    U32 n;
+    U32 nextRankStart;
+    void* ptr = DTable+1;
+    HUF_DEltX2* const dt = (HUF_DEltX2*)(ptr);
+
+    HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16));   /* if compilation fails here, assertion is false */
+    //memset(huffWeight, 0, sizeof(huffWeight));   /* is not necessary, even though some analyzer complain ... */
+
+    iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
+    if (HUF_isError(iSize)) return iSize;
+
+    /* check result */
+    if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge);   /* DTable is too small */
+    DTable[0] = (U16)tableLog;   /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */
+
+    /* Prepare ranks */
+    nextRankStart = 0;
+    for (n=1; n<=tableLog; n++)
+    {
+        U32 current = nextRankStart;
+        nextRankStart += (rankVal[n] << (n-1));
+        rankVal[n] = current;
+    }
+
+    /* fill DTable */
+    for (n=0; n<nbSymbols; n++)
+    {
+        const U32 w = huffWeight[n];
+        const U32 length = (1 << w) >> 1;
+        U32 i;
+        HUF_DEltX2 D;
+        D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
+        for (i = rankVal[w]; i < rankVal[w] + length; i++)
+            dt[i] = D;
+        rankVal[w] += length;
+    }
+
+    return iSize;
+}
+
+static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+        const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+        const BYTE c = dt[val].byte;
+        BIT_skipBits(Dstream, dt[val].nbBits);
+        return c;
+}
+
+#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+    *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+    if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
+        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+    if (MEM_64bits()) \
+        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
+{
+    BYTE* const pStart = p;
+
+    /* up to 4 symbols at a time */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))
+    {
+        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
+        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+    }
+
+    /* closer to the end */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
+        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+    /* no more data to retrieve from bitstream, hence no need to reload */
+    while (p < pEnd)
+        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+    return pEnd-pStart;
+}
+
+
+static size_t HUF_decompress4X2_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const U16* DTable)
+{
+    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
+
+    {
+        const BYTE* const istart = (const BYTE*) cSrc;
+        BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+
+        const void* ptr = DTable;
+        const HUF_DEltX2* const dt = ((const HUF_DEltX2*)ptr) +1;
+        const U32 dtLog = DTable[0];
+        size_t errorCode;
+
+        /* Init */
+        BIT_DStream_t bitD1;
+        BIT_DStream_t bitD2;
+        BIT_DStream_t bitD3;
+        BIT_DStream_t bitD4;
+        const size_t length1 = MEM_readLE16(istart);
+        const size_t length2 = MEM_readLE16(istart+2);
+        const size_t length3 = MEM_readLE16(istart+4);
+        size_t length4;
+        const BYTE* const istart1 = istart + 6;  /* jumpTable */
+        const BYTE* const istart2 = istart1 + length1;
+        const BYTE* const istart3 = istart2 + length2;
+        const BYTE* const istart4 = istart3 + length3;
+        const size_t segmentSize = (dstSize+3) / 4;
+        BYTE* const opStart2 = ostart + segmentSize;
+        BYTE* const opStart3 = opStart2 + segmentSize;
+        BYTE* const opStart4 = opStart3 + segmentSize;
+        BYTE* op1 = ostart;
+        BYTE* op2 = opStart2;
+        BYTE* op3 = opStart3;
+        BYTE* op4 = opStart4;
+        U32 endSignal;
+
+        length4 = cSrcSize - (length1 + length2 + length3 + 6);
+        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
+        errorCode = BIT_initDStream(&bitD1, istart1, length1);
+        if (HUF_isError(errorCode)) return errorCode;
+        errorCode = BIT_initDStream(&bitD2, istart2, length2);
+        if (HUF_isError(errorCode)) return errorCode;
+        errorCode = BIT_initDStream(&bitD3, istart3, length3);
+        if (HUF_isError(errorCode)) return errorCode;
+        errorCode = BIT_initDStream(&bitD4, istart4, length4);
+        if (HUF_isError(errorCode)) return errorCode;
+
+        /* 16-32 symbols per loop (4-8 symbols per stream) */
+        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
+        {
+            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
+            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+
+            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        }
+
+        /* check corruption */
+        if (op1 > opStart2) return ERROR(corruption_detected);
+        if (op2 > opStart3) return ERROR(corruption_detected);
+        if (op3 > opStart4) return ERROR(corruption_detected);
+        /* note : op4 supposed already verified within main loop */
+
+        /* finish bitStreams one by one */
+        HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+        HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+        HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+        HUF_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
+
+        /* check */
+        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+        if (!endSignal) return ERROR(corruption_detected);
+
+        /* decoded size */
+        return dstSize;
+    }
+}
+
+
+static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG);
+    const BYTE* ip = (const BYTE*) cSrc;
+    size_t errorCode;
+
+    errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize);
+    if (HUF_isError(errorCode)) return errorCode;
+    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += errorCode;
+    cSrcSize -= errorCode;
+
+    return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+/***************************/
+/* double-symbols decoding */
+/***************************/
+
+static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,
+                           const U32* rankValOrigin, const int minWeight,
+                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
+                           U32 nbBitsBaseline, U16 baseSeq)
+{
+    HUF_DEltX4 DElt;
+    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
+    U32 s;
+
+    /* get pre-calculated rankVal */
+    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+    /* fill skipped values */
+    if (minWeight>1)
+    {
+        U32 i, skipSize = rankVal[minWeight];
+        MEM_writeLE16(&(DElt.sequence), baseSeq);
+        DElt.nbBits   = (BYTE)(consumed);
+        DElt.length   = 1;
+        for (i = 0; i < skipSize; i++)
+            DTable[i] = DElt;
+    }
+
+    /* fill DTable */
+    for (s=0; s<sortedListSize; s++)   /* note : sortedSymbols already skipped */
+    {
+        const U32 symbol = sortedSymbols[s].symbol;
+        const U32 weight = sortedSymbols[s].weight;
+        const U32 nbBits = nbBitsBaseline - weight;
+        const U32 length = 1 << (sizeLog-nbBits);
+        const U32 start = rankVal[weight];
+        U32 i = start;
+        const U32 end = start + length;
+
+        MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
+        DElt.nbBits = (BYTE)(nbBits + consumed);
+        DElt.length = 2;
+        do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */
+
+        rankVal[weight] += length;
+    }
+}
+
+typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1];
+
+static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
+                           const sortedSymbol_t* sortedList, const U32 sortedListSize,
+                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
+                           const U32 nbBitsBaseline)
+{
+    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
+    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */
+    const U32 minBits  = nbBitsBaseline - maxWeight;
+    U32 s;
+
+    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+    /* fill DTable */
+    for (s=0; s<sortedListSize; s++)
+    {
+        const U16 symbol = sortedList[s].symbol;
+        const U32 weight = sortedList[s].weight;
+        const U32 nbBits = nbBitsBaseline - weight;
+        const U32 start = rankVal[weight];
+        const U32 length = 1 << (targetLog-nbBits);
+
+        if (targetLog-nbBits >= minBits)   /* enough room for a second symbol */
+        {
+            U32 sortedRank;
+            int minWeight = nbBits + scaleLog;
+            if (minWeight < 1) minWeight = 1;
+            sortedRank = rankStart[minWeight];
+            HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
+                           rankValOrigin[nbBits], minWeight,
+                           sortedList+sortedRank, sortedListSize-sortedRank,
+                           nbBitsBaseline, symbol);
+        }
+        else
+        {
+            U32 i;
+            const U32 end = start + length;
+            HUF_DEltX4 DElt;
+
+            MEM_writeLE16(&(DElt.sequence), symbol);
+            DElt.nbBits   = (BYTE)(nbBits);
+            DElt.length   = 1;
+            for (i = start; i < end; i++)
+                DTable[i] = DElt;
+        }
+        rankVal[weight] += length;
+    }
+}
+
+static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
+{
+    BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];
+    sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];
+    U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
+    U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
+    U32* const rankStart = rankStart0+1;
+    rankVal_t rankVal;
+    U32 tableLog, maxW, sizeOfSort, nbSymbols;
+    const U32 memLog = DTable[0];
+    const BYTE* ip = (const BYTE*) src;
+    size_t iSize = ip[0];
+    void* ptr = DTable;
+    HUF_DEltX4* const dt = ((HUF_DEltX4*)ptr) + 1;
+
+    HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32));   /* if compilation fails here, assertion is false */
+    if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
+    //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */
+
+    iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
+    if (HUF_isError(iSize)) return iSize;
+
+    /* check result */
+    if (tableLog > memLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */
+
+    /* find maxWeight */
+    for (maxW = tableLog; rankStats[maxW]==0; maxW--)
+        { if (!maxW) return ERROR(GENERIC); }  /* necessarily finds a solution before maxW==0 */
+
+    /* Get start index of each weight */
+    {
+        U32 w, nextRankStart = 0;
+        for (w=1; w<=maxW; w++)
+        {
+            U32 current = nextRankStart;
+            nextRankStart += rankStats[w];
+            rankStart[w] = current;
+        }
+        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/
+        sizeOfSort = nextRankStart;
+    }
+
+    /* sort symbols by weight */
+    {
+        U32 s;
+        for (s=0; s<nbSymbols; s++)
+        {
+            U32 w = weightList[s];
+            U32 r = rankStart[w]++;
+            sortedSymbol[r].symbol = (BYTE)s;
+            sortedSymbol[r].weight = (BYTE)w;
+        }
+        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
+    }
+
+    /* Build rankVal */
+    {
+        const U32 minBits = tableLog+1 - maxW;
+        U32 nextRankVal = 0;
+        U32 w, consumed;
+        const int rescale = (memLog-tableLog) - 1;   /* tableLog <= memLog */
+        U32* rankVal0 = rankVal[0];
+        for (w=1; w<=maxW; w++)
+        {
+            U32 current = nextRankVal;
+            nextRankVal += rankStats[w] << (w+rescale);
+            rankVal0[w] = current;
+        }
+        for (consumed = minBits; consumed <= memLog - minBits; consumed++)
+        {
+            U32* rankValPtr = rankVal[consumed];
+            for (w = 1; w <= maxW; w++)
+            {
+                rankValPtr[w] = rankVal0[w] >> consumed;
+            }
+        }
+    }
+
+    HUF_fillDTableX4(dt, memLog,
+                   sortedSymbol, sizeOfSort,
+                   rankStart0, rankVal, maxW,
+                   tableLog+1);
+
+    return iSize;
+}
+
+
+static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
+{
+    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    memcpy(op, dt+val, 2);
+    BIT_skipBits(DStream, dt[val].nbBits);
+    return dt[val].length;
+}
+
+static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
+{
+    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    memcpy(op, dt+val, 1);
+    if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
+    else
+    {
+        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))
+        {
+            BIT_skipBits(DStream, dt[val].nbBits);
+            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+        }
+    }
+    return 1;
+}
+
+
+#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
+    ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
+    if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
+        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
+    if (MEM_64bits()) \
+        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
+{
+    BYTE* const pStart = p;
+
+    /* up to 8 symbols at a time */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7))
+    {
+        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
+        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
+    }
+
+    /* closer to the end */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2))
+        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
+
+    while (p <= pEnd-2)
+        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
+
+    if (p < pEnd)
+        p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
+
+    return p-pStart;
+}
+
+
+
+static size_t HUF_decompress4X4_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const U32* DTable)
+{
+    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
+
+    {
+        const BYTE* const istart = (const BYTE*) cSrc;
+        BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+
+        const void* ptr = DTable;
+        const HUF_DEltX4* const dt = ((const HUF_DEltX4*)ptr) +1;
+        const U32 dtLog = DTable[0];
+        size_t errorCode;
+
+        /* Init */
+        BIT_DStream_t bitD1;
+        BIT_DStream_t bitD2;
+        BIT_DStream_t bitD3;
+        BIT_DStream_t bitD4;
+        const size_t length1 = MEM_readLE16(istart);
+        const size_t length2 = MEM_readLE16(istart+2);
+        const size_t length3 = MEM_readLE16(istart+4);
+        size_t length4;
+        const BYTE* const istart1 = istart + 6;  /* jumpTable */
+        const BYTE* const istart2 = istart1 + length1;
+        const BYTE* const istart3 = istart2 + length2;
+        const BYTE* const istart4 = istart3 + length3;
+        const size_t segmentSize = (dstSize+3) / 4;
+        BYTE* const opStart2 = ostart + segmentSize;
+        BYTE* const opStart3 = opStart2 + segmentSize;
+        BYTE* const opStart4 = opStart3 + segmentSize;
+        BYTE* op1 = ostart;
+        BYTE* op2 = opStart2;
+        BYTE* op3 = opStart3;
+        BYTE* op4 = opStart4;
+        U32 endSignal;
+
+        length4 = cSrcSize - (length1 + length2 + length3 + 6);
+        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
+        errorCode = BIT_initDStream(&bitD1, istart1, length1);
+        if (HUF_isError(errorCode)) return errorCode;
+        errorCode = BIT_initDStream(&bitD2, istart2, length2);
+        if (HUF_isError(errorCode)) return errorCode;
+        errorCode = BIT_initDStream(&bitD3, istart3, length3);
+        if (HUF_isError(errorCode)) return errorCode;
+        errorCode = BIT_initDStream(&bitD4, istart4, length4);
+        if (HUF_isError(errorCode)) return errorCode;
+
+        /* 16-32 symbols per loop (4-8 symbols per stream) */
+        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
+        {
+            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
+            HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
+            HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
+            HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
+            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
+            HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
+            HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
+            HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
+
+            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        }
+
+        /* check corruption */
+        if (op1 > opStart2) return ERROR(corruption_detected);
+        if (op2 > opStart3) return ERROR(corruption_detected);
+        if (op3 > opStart4) return ERROR(corruption_detected);
+        /* note : op4 supposed already verified within main loop */
+
+        /* finish bitStreams one by one */
+        HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
+        HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
+        HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
+        HUF_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);
+
+        /* check */
+        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+        if (!endSignal) return ERROR(corruption_detected);
+
+        /* decoded size */
+        return dstSize;
+    }
+}
+
+
+static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG);
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize);
+    if (HUF_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize;
+    cSrcSize -= hSize;
+
+    return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+/**********************************/
+/* Generic decompression selector */
+/**********************************/
+
+typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
+static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
+{
+    /* single, double, quad */
+    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */
+    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */
+    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */
+    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */
+    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */
+    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */
+    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */
+    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */
+    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */
+    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */
+    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
+    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
+    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
+    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */
+    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */
+    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */
+};
+
+typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+
+static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    static const decompressionAlgo decompress[3] = { HUF_decompress4X2, HUF_decompress4X4, NULL };
+    /* estimate decompression time */
+    U32 Q;
+    const U32 D256 = (U32)(dstSize >> 8);
+    U32 Dtime[3];
+    U32 algoNb = 0;
+    int n;
+
+    /* validation checks */
+    if (dstSize == 0) return ERROR(dstSize_tooSmall);
+    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
+    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
+    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
+
+    /* decoder timing evaluation */
+    Q = (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 since dstSize > cSrcSize */
+    for (n=0; n<3; n++)
+        Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
+
+    Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
+
+    if (Dtime[1] < Dtime[0]) algoNb = 1;
+
+    return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
+
+    //return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize);   /* multi-streams single-symbol decoding */
+    //return HUF_decompress4X4(dst, dstSize, cSrc, cSrcSize);   /* multi-streams double-symbols decoding */
+    //return HUF_decompress4X6(dst, dstSize, cSrc, cSrcSize);   /* multi-streams quad-symbols decoding */
+}
+/*
+    zstd - standard compression library
+    Copyright (C) 2014-2015, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd source repository : https://github.com/Cyan4973/zstd
+    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+/* ***************************************************************
+*  Tuning parameters
+*****************************************************************/
+/*!
+*  MEMORY_USAGE :
+*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+*  Increasing memory usage improves compression ratio
+*  Reduced memory usage can improve speed, due to cache effect
+*/
+#define ZSTD_MEMORY_USAGE 17
+
+/*!
+ * HEAPMODE :
+ * Select how default compression functions will allocate memory for their hash table,
+ * in memory stack (0, fastest), or in memory heap (1, requires malloc())
+ * Note that compression context is fairly large, as a consequence heap memory is recommended.
+ */
+#ifndef ZSTD_HEAPMODE
+#  define ZSTD_HEAPMODE 1
+#endif /* ZSTD_HEAPMODE */
+
+/*!
+*  LEGACY_SUPPORT :
+*  decompressor can decode older formats (starting from Zstd 0.1+)
+*/
+#ifndef ZSTD_LEGACY_SUPPORT
+#  define ZSTD_LEGACY_SUPPORT 1
+#endif
+
+
+/* *******************************************************
+*  Includes
+*********************************************************/
+#include <stdlib.h>      /* calloc */
+#include <string.h>      /* memcpy, memmove */
+#include <stdio.h>       /* debug : printf */
+
+
+/* *******************************************************
+*  Compiler specifics
+*********************************************************/
+#ifdef __AVX2__
+#  include <immintrin.h>   /* AVX2 intrinsics */
+#endif
+
+#ifdef _MSC_VER    /* Visual Studio */
+#  include <intrin.h>                    /* For Visual 2005 */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */
+#else
+#  define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+#endif
+
+
+/* *******************************************************
+*  Constants
+*********************************************************/
+#define HASH_LOG (ZSTD_MEMORY_USAGE - 2)
+#define HASH_TABLESIZE (1 << HASH_LOG)
+#define HASH_MASK (HASH_TABLESIZE - 1)
+
+#define KNUTH 2654435761
+
+#define BIT7 128
+#define BIT6  64
+#define BIT5  32
+#define BIT4  16
+#define BIT1   2
+#define BIT0   1
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BLOCKSIZE (128 KB)                 /* define, for static allocation */
+#define MIN_SEQUENCES_SIZE (2 /*seqNb*/ + 2 /*dumps*/ + 3 /*seqTables*/ + 1 /*bitStream*/)
+#define MIN_CBLOCK_SIZE (3 /*litCSize*/ + MIN_SEQUENCES_SIZE)
+#define IS_RAW BIT0
+#define IS_RLE BIT1
+
+#define WORKPLACESIZE (BLOCKSIZE*3)
+#define MINMATCH 4
+#define MLbits   7
+#define LLbits   6
+#define Offbits  5
+#define MaxML  ((1<<MLbits )-1)
+#define MaxLL  ((1<<LLbits )-1)
+#define MaxOff   31
+#define LitFSELog  11
+#define MLFSELog   10
+#define LLFSELog   10
+#define OffFSELog   9
+#define MAX(a,b) ((a)<(b)?(b):(a))
+#define MaxSeq MAX(MaxLL, MaxML)
+
+#define LITERAL_NOENTROPY 63
+#define COMMAND_NOENTROPY 7   /* to remove */
+
+static const size_t ZSTD_blockHeaderSize = 3;
+static const size_t ZSTD_frameHeaderSize = 4;
+
+
+/* *******************************************************
+*  Memory operations
+**********************************************************/
+static void   ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
+
+static void   ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
+
+#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
+
+/*! ZSTD_wildcopy : custom version of memcpy(), can copy up to 7-8 bytes too many */
+static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
+{
+    const BYTE* ip = (const BYTE*)src;
+    BYTE* op = (BYTE*)dst;
+    BYTE* const oend = op + length;
+    do COPY8(op, ip) while (op < oend);
+}
+
+
+/* **************************************
+*  Local structures
+****************************************/
+typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
+
+typedef struct
+{
+    blockType_t blockType;
+    U32 origSize;
+} blockProperties_t;
+
+typedef struct {
+    void* buffer;
+    U32*  offsetStart;
+    U32*  offset;
+    BYTE* offCodeStart;
+    BYTE* offCode;
+    BYTE* litStart;
+    BYTE* lit;
+    BYTE* litLengthStart;
+    BYTE* litLength;
+    BYTE* matchLengthStart;
+    BYTE* matchLength;
+    BYTE* dumpsStart;
+    BYTE* dumps;
+} seqStore_t;
+
+
+/* *************************************
+*  Error Management
+***************************************/
+/*! ZSTD_isError
+*   tells if a return value is an error code */
+static unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
+
+
+
+/* *************************************************************
+*   Decompression section
+***************************************************************/
+struct ZSTD_DCtx_s
+{
+    U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
+    U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
+    U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
+    void* previousDstEnd;
+    void* base;
+    size_t expected;
+    blockType_t bType;
+    U32 phase;
+    const BYTE* litPtr;
+    size_t litSize;
+    BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */];
+};   /* typedef'd to ZSTD_Dctx within "zstd_static.h" */
+
+
+static size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
+{
+    const BYTE* const in = (const BYTE* const)src;
+    BYTE headerFlags;
+    U32 cSize;
+
+    if (srcSize < 3) return ERROR(srcSize_wrong);
+
+    headerFlags = *in;
+    cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
+
+    bpPtr->blockType = (blockType_t)(headerFlags >> 6);
+    bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
+
+    if (bpPtr->blockType == bt_end) return 0;
+    if (bpPtr->blockType == bt_rle) return 1;
+    return cSize;
+}
+
+static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
+    memcpy(dst, src, srcSize);
+    return srcSize;
+}
+
+
+/** ZSTD_decompressLiterals
+    @return : nb of bytes read from src, or an error code*/
+static size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr,
+                                const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+
+    const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+    const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+
+    if (litSize > *maxDstSizePtr) return ERROR(corruption_detected);
+    if (litCSize + 5 > srcSize) return ERROR(corruption_detected);
+
+    if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected);
+
+    *maxDstSizePtr = litSize;
+    return litCSize + 5;
+}
+
+
+/** ZSTD_decodeLiteralsBlock
+    @return : nb of bytes read from src (< srcSize )*/
+static size_t ZSTD_decodeLiteralsBlock(void* ctx,
+                          const void* src, size_t srcSize)
+{
+    ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;
+    const BYTE* const istart = (const BYTE* const)src;
+
+    /* any compressed block with literals segment must be at least this size */
+    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
+
+    switch(*istart & 3)
+    {
+    default:
+    case 0:
+        {
+            size_t litSize = BLOCKSIZE;
+            const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize);
+            dctx->litPtr = dctx->litBuffer;
+            dctx->litSize = litSize;
+            memset(dctx->litBuffer + dctx->litSize, 0, 8);
+            return readSize;   /* works if it's an error too */
+        }
+    case IS_RAW:
+        {
+            const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+            if (litSize > srcSize-11)   /* risk of reading too far with wildcopy */
+            {
+                if (litSize > srcSize-3) return ERROR(corruption_detected);
+                memcpy(dctx->litBuffer, istart, litSize);
+                dctx->litPtr = dctx->litBuffer;
+                dctx->litSize = litSize;
+                memset(dctx->litBuffer + dctx->litSize, 0, 8);
+                return litSize+3;
+            }
+            /* direct reference into compressed stream */
+            dctx->litPtr = istart+3;
+            dctx->litSize = litSize;
+            return litSize+3;
+        }
+    case IS_RLE:
+        {
+            const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+            if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
+            memset(dctx->litBuffer, istart[3], litSize + 8);
+            dctx->litPtr = dctx->litBuffer;
+            dctx->litSize = litSize;
+            return 4;
+        }
+    }
+}
+
+
+static size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
+                         FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,
+                         const void* src, size_t srcSize)
+{
+    const BYTE* const istart = (const BYTE* const)src;
+    const BYTE* ip = istart;
+    const BYTE* const iend = istart + srcSize;
+    U32 LLtype, Offtype, MLtype;
+    U32 LLlog, Offlog, MLlog;
+    size_t dumpsLength;
+
+    /* check */
+    if (srcSize < 5) return ERROR(srcSize_wrong);
+
+    /* SeqHead */
+    *nbSeq = MEM_readLE16(ip); ip+=2;
+    LLtype  = *ip >> 6;
+    Offtype = (*ip >> 4) & 3;
+    MLtype  = (*ip >> 2) & 3;
+    if (*ip & 2)
+    {
+        dumpsLength  = ip[2];
+        dumpsLength += ip[1] << 8;
+        ip += 3;
+    }
+    else
+    {
+        dumpsLength  = ip[1];
+        dumpsLength += (ip[0] & 1) << 8;
+        ip += 2;
+    }
+    *dumpsPtr = ip;
+    ip += dumpsLength;
+    *dumpsLengthPtr = dumpsLength;
+
+    /* check */
+    if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
+
+    /* sequences */
+    {
+        S16 norm[MaxML+1];    /* assumption : MaxML >= MaxLL and MaxOff */
+        size_t headerSize;
+
+        /* Build DTables */
+        switch(LLtype)
+        {
+        case bt_rle :
+            LLlog = 0;
+            FSE_buildDTable_rle(DTableLL, *ip++); break;
+        case bt_raw :
+            LLlog = LLbits;
+            FSE_buildDTable_raw(DTableLL, LLbits); break;
+        default :
+            {   U32 max = MaxLL;
+                headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);
+                if (FSE_isError(headerSize)) return ERROR(GENERIC);
+                if (LLlog > LLFSELog) return ERROR(corruption_detected);
+                ip += headerSize;
+                FSE_buildDTable(DTableLL, norm, max, LLlog);
+        }   }
+
+        switch(Offtype)
+        {
+        case bt_rle :
+            Offlog = 0;
+            if (ip > iend-2) return ERROR(srcSize_wrong);   /* min : "raw", hence no header, but at least xxLog bits */
+            FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */
+            break;
+        case bt_raw :
+            Offlog = Offbits;
+            FSE_buildDTable_raw(DTableOffb, Offbits); break;
+        default :
+            {   U32 max = MaxOff;
+                headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);
+                if (FSE_isError(headerSize)) return ERROR(GENERIC);
+                if (Offlog > OffFSELog) return ERROR(corruption_detected);
+                ip += headerSize;
+                FSE_buildDTable(DTableOffb, norm, max, Offlog);
+        }   }
+
+        switch(MLtype)
+        {
+        case bt_rle :
+            MLlog = 0;
+            if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
+            FSE_buildDTable_rle(DTableML, *ip++); break;
+        case bt_raw :
+            MLlog = MLbits;
+            FSE_buildDTable_raw(DTableML, MLbits); break;
+        default :
+            {   U32 max = MaxML;
+                headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);
+                if (FSE_isError(headerSize)) return ERROR(GENERIC);
+                if (MLlog > MLFSELog) return ERROR(corruption_detected);
+                ip += headerSize;
+                FSE_buildDTable(DTableML, norm, max, MLlog);
+    }   }   }
+
+    return ip-istart;
+}
+
+
+typedef struct {
+    size_t litLength;
+    size_t offset;
+    size_t matchLength;
+} seq_t;
+
+typedef struct {
+    BIT_DStream_t DStream;
+    FSE_DState_t stateLL;
+    FSE_DState_t stateOffb;
+    FSE_DState_t stateML;
+    size_t prevOffset;
+    const BYTE* dumps;
+    const BYTE* dumpsEnd;
+} seqState_t;
+
+
+static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
+{
+    size_t litLength;
+    size_t prevOffset;
+    size_t offset;
+    size_t matchLength;
+    const BYTE* dumps = seqState->dumps;
+    const BYTE* const de = seqState->dumpsEnd;
+
+    /* Literal length */
+    litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
+    prevOffset = litLength ? seq->offset : seqState->prevOffset;
+    seqState->prevOffset = seq->offset;
+    if (litLength == MaxLL)
+    {
+        U32 add = *dumps++;
+        if (add < 255) litLength += add;
+        else
+        {
+            litLength = MEM_readLE32(dumps) & 0xFFFFFF;  /* no pb : dumps is always followed by seq tables > 1 byte */
+            dumps += 3;
+        }
+        if (dumps >= de) dumps = de-1;   /* late correction, to avoid read overflow (data is now corrupted anyway) */
+    }
+
+    /* Offset */
+    {
+        static const size_t offsetPrefix[MaxOff+1] = {  /* note : size_t faster than U32 */
+                1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,
+                512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,
+                524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };
+        U32 offsetCode, nbBits;
+        offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream));   /* <= maxOff, by table construction */
+        if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
+        nbBits = offsetCode - 1;
+        if (offsetCode==0) nbBits = 0;   /* cmove */
+        offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits);
+        if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
+        if (offsetCode==0) offset = prevOffset;   /* cmove */
+    }
+
+    /* MatchLength */
+    matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
+    if (matchLength == MaxML)
+    {
+        U32 add = *dumps++;
+        if (add < 255) matchLength += add;
+        else
+        {
+            matchLength = MEM_readLE32(dumps) & 0xFFFFFF;  /* no pb : dumps is always followed by seq tables > 1 byte */
+            dumps += 3;
+        }
+        if (dumps >= de) dumps = de-1;   /* late correction, to avoid read overflow (data is now corrupted anyway) */
+    }
+    matchLength += MINMATCH;
+
+    /* save result */
+    seq->litLength = litLength;
+    seq->offset = offset;
+    seq->matchLength = matchLength;
+    seqState->dumps = dumps;
+}
+
+
+static size_t ZSTD_execSequence(BYTE* op,
+                                seq_t sequence,
+                                const BYTE** litPtr, const BYTE* const litLimit,
+                                BYTE* const base, BYTE* const oend)
+{
+    static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};   /* added */
+    static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11};   /* substracted */
+    const BYTE* const ostart = op;
+    BYTE* const oLitEnd = op + sequence.litLength;
+    BYTE* const oMatchEnd = op + sequence.litLength + sequence.matchLength;   /* risk : address space overflow (32-bits) */
+    BYTE* const oend_8 = oend-8;
+    const BYTE* const litEnd = *litPtr + sequence.litLength;
+
+    /* checks */
+    if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall);   /* last match must start at a minimum distance of 8 from oend */
+    if (oMatchEnd > oend) return ERROR(dstSize_tooSmall);   /* overwrite beyond dst buffer */
+    if (litEnd > litLimit) return ERROR(corruption_detected);   /* overRead beyond lit buffer */
+
+    /* copy Literals */
+    ZSTD_wildcopy(op, *litPtr, sequence.litLength);   /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
+    op = oLitEnd;
+    *litPtr = litEnd;   /* update for next sequence */
+
+    /* copy Match */
+    {
+        const BYTE* match = op - sequence.offset;
+
+        /* check */
+        if (sequence.offset > (size_t)op) return ERROR(corruption_detected);   /* address space overflow test (this test seems kept by clang optimizer) */
+        //if (match > op) return ERROR(corruption_detected);   /* address space overflow test (is clang optimizer removing this test ?) */
+        if (match < base) return ERROR(corruption_detected);
+
+        /* close range match, overlap */
+        if (sequence.offset < 8)
+        {
+            const int dec64 = dec64table[sequence.offset];
+            op[0] = match[0];
+            op[1] = match[1];
+            op[2] = match[2];
+            op[3] = match[3];
+            match += dec32table[sequence.offset];
+            ZSTD_copy4(op+4, match);
+            match -= dec64;
+        }
+        else
+        {
+            ZSTD_copy8(op, match);
+        }
+        op += 8; match += 8;
+
+        if (oMatchEnd > oend-(16-MINMATCH))
+        {
+            if (op < oend_8)
+            {
+                ZSTD_wildcopy(op, match, oend_8 - op);
+                match += oend_8 - op;
+                op = oend_8;
+            }
+            while (op < oMatchEnd) *op++ = *match++;
+        }
+        else
+        {
+            ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
+        }
+    }
+
+    return oMatchEnd - ostart;
+}
+
+static size_t ZSTD_decompressSequences(
+                               void* ctx,
+                               void* dst, size_t maxDstSize,
+                         const void* seqStart, size_t seqSize)
+{
+    ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;
+    const BYTE* ip = (const BYTE*)seqStart;
+    const BYTE* const iend = ip + seqSize;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* op = ostart;
+    BYTE* const oend = ostart + maxDstSize;
+    size_t errorCode, dumpsLength;
+    const BYTE* litPtr = dctx->litPtr;
+    const BYTE* const litEnd = litPtr + dctx->litSize;
+    int nbSeq;
+    const BYTE* dumps;
+    U32* DTableLL = dctx->LLTable;
+    U32* DTableML = dctx->MLTable;
+    U32* DTableOffb = dctx->OffTable;
+    BYTE* const base = (BYTE*) (dctx->base);
+
+    /* Build Decoding Tables */
+    errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
+                                      DTableLL, DTableML, DTableOffb,
+                                      ip, iend-ip);
+    if (ZSTD_isError(errorCode)) return errorCode;
+    ip += errorCode;
+
+    /* Regen sequences */
+    {
+        seq_t sequence;
+        seqState_t seqState;
+
+        memset(&sequence, 0, sizeof(sequence));
+        seqState.dumps = dumps;
+        seqState.dumpsEnd = dumps + dumpsLength;
+        seqState.prevOffset = sequence.offset = 4;
+        errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip);
+        if (ERR_isError(errorCode)) return ERROR(corruption_detected);
+        FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
+        FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
+        FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
+
+        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (nbSeq>0) ; )
+        {
+            size_t oneSeqSize;
+            nbSeq--;
+            ZSTD_decodeSequence(&sequence, &seqState);
+            oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend);
+            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+            op += oneSeqSize;
+        }
+
+        /* check if reached exact end */
+        if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected);   /* requested too much : data is corrupted */
+        if (nbSeq<0) return ERROR(corruption_detected);   /* requested too many sequences : data is corrupted */
+
+        /* last literal segment */
+        {
+            size_t lastLLSize = litEnd - litPtr;
+            if (litPtr > litEnd) return ERROR(corruption_detected);
+            if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
+            if (op != litPtr) memmove(op, litPtr, lastLLSize);
+            op += lastLLSize;
+        }
+    }
+
+    return op-ostart;
+}
+
+
+static size_t ZSTD_decompressBlock(
+                            void* ctx,
+                            void* dst, size_t maxDstSize,
+                      const void* src, size_t srcSize)
+{
+    /* blockType == blockCompressed */
+    const BYTE* ip = (const BYTE*)src;
+
+    /* Decode literals sub-block */
+    size_t litCSize = ZSTD_decodeLiteralsBlock(ctx, src, srcSize);
+    if (ZSTD_isError(litCSize)) return litCSize;
+    ip += litCSize;
+    srcSize -= litCSize;
+
+    return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize);
+}
+
+
+static size_t ZSTD_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+    const BYTE* iend = ip + srcSize;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* op = ostart;
+    BYTE* const oend = ostart + maxDstSize;
+    size_t remainingSize = srcSize;
+    U32 magicNumber;
+    blockProperties_t blockProperties;
+
+    /* Frame Header */
+    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+    magicNumber = MEM_readLE32(src);
+    if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
+    ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
+
+    /* Loop on each block */
+    while (1)
+    {
+        size_t decodedSize=0;
+        size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties);
+        if (ZSTD_isError(cBlockSize)) return cBlockSize;
+
+        ip += ZSTD_blockHeaderSize;
+        remainingSize -= ZSTD_blockHeaderSize;
+        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+        switch(blockProperties.blockType)
+        {
+        case bt_compressed:
+            decodedSize = ZSTD_decompressBlock(ctx, op, oend-op, ip, cBlockSize);
+            break;
+        case bt_raw :
+            decodedSize = ZSTD_copyUncompressedBlock(op, oend-op, ip, cBlockSize);
+            break;
+        case bt_rle :
+            return ERROR(GENERIC);   /* not yet supported */
+            break;
+        case bt_end :
+            /* end of frame */
+            if (remainingSize) return ERROR(srcSize_wrong);
+            break;
+        default:
+            return ERROR(GENERIC);   /* impossible */
+        }
+        if (cBlockSize == 0) break;   /* bt_end */
+
+        if (ZSTD_isError(decodedSize)) return decodedSize;
+        op += decodedSize;
+        ip += cBlockSize;
+        remainingSize -= cBlockSize;
+    }
+
+    return op-ostart;
+}
+
+static size_t ZSTD_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    ZSTD_DCtx ctx;
+    ctx.base = dst;
+    return ZSTD_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);
+}
+
+static size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+    size_t remainingSize = srcSize;
+    U32 magicNumber;
+    blockProperties_t blockProperties;
+
+    /* Frame Header */
+    if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+    magicNumber = MEM_readLE32(src);
+    if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
+    ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
+
+    /* Loop on each block */
+    while (1)
+    {
+        size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+        if (ZSTD_isError(cBlockSize)) return cBlockSize;
+
+        ip += ZSTD_blockHeaderSize;
+        remainingSize -= ZSTD_blockHeaderSize;
+        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+        if (cBlockSize == 0) break;   /* bt_end */
+
+        ip += cBlockSize;
+        remainingSize -= cBlockSize;
+    }
+
+    return ip - (const BYTE*)src;
+}
+
+
+/*******************************
+*  Streaming Decompression API
+*******************************/
+
+static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx)
+{
+    dctx->expected = ZSTD_frameHeaderSize;
+    dctx->phase = 0;
+    dctx->previousDstEnd = NULL;
+    dctx->base = NULL;
+    return 0;
+}
+
+static ZSTD_DCtx* ZSTD_createDCtx(void)
+{
+    ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx));
+    if (dctx==NULL) return NULL;
+    ZSTD_resetDCtx(dctx);
+    return dctx;
+}
+
+static size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
+{
+    free(dctx);
+    return 0;
+}
+
+static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx)
+{
+    return dctx->expected;
+}
+
+static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    /* Sanity check */
+    if (srcSize != ctx->expected) return ERROR(srcSize_wrong);
+    if (dst != ctx->previousDstEnd)  /* not contiguous */
+        ctx->base = dst;
+
+    /* Decompress : frame header */
+    if (ctx->phase == 0)
+    {
+        /* Check frame magic header */
+        U32 magicNumber = MEM_readLE32(src);
+        if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
+        ctx->phase = 1;
+        ctx->expected = ZSTD_blockHeaderSize;
+        return 0;
+    }
+
+    /* Decompress : block header */
+    if (ctx->phase == 1)
+    {
+        blockProperties_t bp;
+        size_t blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
+        if (ZSTD_isError(blockSize)) return blockSize;
+        if (bp.blockType == bt_end)
+        {
+            ctx->expected = 0;
+            ctx->phase = 0;
+        }
+        else
+        {
+            ctx->expected = blockSize;
+            ctx->bType = bp.blockType;
+            ctx->phase = 2;
+        }
+
+        return 0;
+    }
+
+    /* Decompress : block content */
+    {
+        size_t rSize;
+        switch(ctx->bType)
+        {
+        case bt_compressed:
+            rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize);
+            break;
+        case bt_raw :
+            rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize);
+            break;
+        case bt_rle :
+            return ERROR(GENERIC);   /* not yet handled */
+            break;
+        case bt_end :   /* should never happen (filtered at phase 1) */
+            rSize = 0;
+            break;
+        default:
+            return ERROR(GENERIC);
+        }
+        ctx->phase = 1;
+        ctx->expected = ZSTD_blockHeaderSize;
+        ctx->previousDstEnd = (void*)( ((char*)dst) + rSize);
+        return rSize;
+    }
+
+}
+
+
+/* wrapper layer */
+
+unsigned ZSTDv03_isError(size_t code)
+{
+    return ZSTD_isError(code);
+}
+
+size_t ZSTDv03_decompress( void* dst, size_t maxOriginalSize,
+                     const void* src, size_t compressedSize)
+{
+    return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);
+}
+
+size_t ZSTDv03_findFrameCompressedSize(const void* src, size_t srcSize)
+{
+    return ZSTD_findFrameCompressedSize(src, srcSize);
+}
+
+ZSTDv03_Dctx* ZSTDv03_createDCtx(void)
+{
+    return (ZSTDv03_Dctx*)ZSTD_createDCtx();
+}
+
+size_t ZSTDv03_freeDCtx(ZSTDv03_Dctx* dctx)
+{
+    return ZSTD_freeDCtx((ZSTD_DCtx*)dctx);
+}
+
+size_t ZSTDv03_resetDCtx(ZSTDv03_Dctx* dctx)
+{
+    return ZSTD_resetDCtx((ZSTD_DCtx*)dctx);
+}
+
+size_t ZSTDv03_nextSrcSizeToDecompress(ZSTDv03_Dctx* dctx)
+{
+    return ZSTD_nextSrcSizeToDecompress((ZSTD_DCtx*)dctx);
+}
+
+size_t ZSTDv03_decompressContinue(ZSTDv03_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    return ZSTD_decompressContinue((ZSTD_DCtx*)dctx, dst, maxDstSize, src, srcSize);
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_v03.h b/vendor/github.com/DataDog/zstd/zstd_v03.h
new file mode 100644
index 0000000..b4449e2
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v03.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_V03_H_298734209782
+#define ZSTD_V03_H_298734209782
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* *************************************
+*  Includes
+***************************************/
+#include <stddef.h>   /* size_t */
+
+
+/* *************************************
+*  Simple one-step function
+***************************************/
+/**
+ZSTDv03_decompress() : decompress ZSTD frames compliant with v0.3.x format
+    compressedSize : is the exact source size
+    maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
+                      It must be equal or larger than originalSize, otherwise decompression will fail.
+    return : the number of bytes decompressed into destination buffer (originalSize)
+             or an errorCode if it fails (which can be tested using ZSTDv01_isError())
+*/
+size_t ZSTDv03_decompress( void* dst, size_t maxOriginalSize,
+                     const void* src, size_t compressedSize);
+
+/**
+ZSTDv03_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.3.x format
+    compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
+    return : the number of bytes that would be read to decompress this frame
+             or an errorCode if it fails (which can be tested using ZSTDv03_isError())
+*/
+size_t ZSTDv03_findFrameCompressedSize(const void* src, size_t compressedSize);
+
+    /**
+ZSTDv03_isError() : tells if the result of ZSTDv03_decompress() is an error
+*/
+unsigned ZSTDv03_isError(size_t code);
+
+
+/* *************************************
+*  Advanced functions
+***************************************/
+typedef struct ZSTDv03_Dctx_s ZSTDv03_Dctx;
+ZSTDv03_Dctx* ZSTDv03_createDCtx(void);
+size_t ZSTDv03_freeDCtx(ZSTDv03_Dctx* dctx);
+
+size_t ZSTDv03_decompressDCtx(void* ctx,
+                              void* dst, size_t maxOriginalSize,
+                        const void* src, size_t compressedSize);
+
+/* *************************************
+*  Streaming functions
+***************************************/
+size_t ZSTDv03_resetDCtx(ZSTDv03_Dctx* dctx);
+
+size_t ZSTDv03_nextSrcSizeToDecompress(ZSTDv03_Dctx* dctx);
+size_t ZSTDv03_decompressContinue(ZSTDv03_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
+/**
+  Use above functions alternatively.
+  ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+  ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
+  Result is the number of bytes regenerated within 'dst'.
+  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
+*/
+
+/* *************************************
+*  Prefix - version detection
+***************************************/
+#define ZSTDv03_magicNumber 0xFD2FB523   /* v0.3 */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_V03_H_298734209782 */
diff --git a/vendor/github.com/DataDog/zstd/zstd_v04.c b/vendor/github.com/DataDog/zstd/zstd_v04.c
new file mode 100644
index 0000000..fb6d1d4
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v04.c
@@ -0,0 +1,3677 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/*- Dependencies -*/
+#include "zstd_v04.h"
+#include "error_private.h"
+
+
+/* ******************************************************************
+   mem.h
+****************************************************************** */
+#ifndef MEM_H_MODULE
+#define MEM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/******************************************
+*  Includes
+******************************************/
+#include <stddef.h>    /* size_t, ptrdiff_t */
+#include <string.h>    /* memcpy */
+
+
+/******************************************
+*  Compiler-specific
+******************************************/
+#if defined(_MSC_VER)   /* Visual Studio */
+#   include <stdlib.h>  /* _byteswap_ulong */
+#   include <intrin.h>  /* _byteswap_* */
+#endif
+#if defined(__GNUC__)
+#  define MEM_STATIC static __attribute__((unused))
+#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+#  define MEM_STATIC static inline
+#elif defined(_MSC_VER)
+#  define MEM_STATIC static __inline
+#else
+#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+
+/****************************************************************
+*  Basic Types
+*****************************************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# include <stdint.h>
+  typedef  uint8_t BYTE;
+  typedef uint16_t U16;
+  typedef  int16_t S16;
+  typedef uint32_t U32;
+  typedef  int32_t S32;
+  typedef uint64_t U64;
+  typedef  int64_t S64;
+#else
+  typedef unsigned char       BYTE;
+  typedef unsigned short      U16;
+  typedef   signed short      S16;
+  typedef unsigned int        U32;
+  typedef   signed int        S32;
+  typedef unsigned long long  U64;
+  typedef   signed long long  S64;
+#endif
+
+
+/*-*************************************
+*  Debug
+***************************************/
+#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1)
+#  include <assert.h>
+#else
+#  ifndef assert
+#    define assert(condition) ((void)0)
+#  endif
+#endif
+
+#define ZSTD_STATIC_ASSERT(c) { enum { ZSTD_static_assert = 1/(int)(!!(c)) }; }
+
+#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=2)
+#  include <stdio.h>
+extern int g_debuglog_enable;
+/* recommended values for ZSTD_DEBUG display levels :
+ * 1 : no display, enables assert() only
+ * 2 : reserved for currently active debug path
+ * 3 : events once per object lifetime (CCtx, CDict, etc.)
+ * 4 : events once per frame
+ * 5 : events once per block
+ * 6 : events once per sequence (*very* verbose) */
+#  define RAWLOG(l, ...) {                                      \
+                if ((g_debuglog_enable) & (l<=ZSTD_DEBUG)) {    \
+                    fprintf(stderr, __VA_ARGS__);               \
+            }   }
+#  define DEBUGLOG(l, ...) {                                    \
+                if ((g_debuglog_enable) & (l<=ZSTD_DEBUG)) {    \
+                    fprintf(stderr, __FILE__ ": " __VA_ARGS__); \
+                    fprintf(stderr, " \n");                     \
+            }   }
+#else
+#  define RAWLOG(l, ...)      {}    /* disabled */
+#  define DEBUGLOG(l, ...)    {}    /* disabled */
+#endif
+
+
+/****************************************************************
+*  Memory I/O
+*****************************************************************/
+/* MEM_FORCE_MEMORY_ACCESS
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ *            It can generate buggy code on targets generating assembly depending on alignment.
+ *            But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
+#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+#    define MEM_FORCE_MEMORY_ACCESS 2
+#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
+  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+#    define MEM_FORCE_MEMORY_ACCESS 1
+#  endif
+#endif
+
+MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }
+MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }
+
+MEM_STATIC unsigned MEM_isLittleEndian(void)
+{
+    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */
+    return one.c[0];
+}
+
+#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
+
+/* violates C standard on structure alignment.
+Only use if no other choice to achieve best performance on target platform */
+MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
+MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
+MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
+
+#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;
+
+MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
+MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
+
+#else
+
+/* default method, safe and standard.
+   can sometimes prove slower */
+
+MEM_STATIC U16 MEM_read16(const void* memPtr)
+{
+    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U32 MEM_read32(const void* memPtr)
+{
+    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U64 MEM_read64(const void* memPtr)
+{
+    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value)
+{
+    memcpy(memPtr, &value, sizeof(value));
+}
+
+#endif // MEM_FORCE_MEMORY_ACCESS
+
+
+MEM_STATIC U16 MEM_readLE16(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read16(memPtr);
+    else
+    {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U16)(p[0] + (p[1]<<8));
+    }
+}
+
+MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
+{
+    if (MEM_isLittleEndian())
+    {
+        MEM_write16(memPtr, val);
+    }
+    else
+    {
+        BYTE* p = (BYTE*)memPtr;
+        p[0] = (BYTE)val;
+        p[1] = (BYTE)(val>>8);
+    }
+}
+
+MEM_STATIC U32 MEM_readLE32(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read32(memPtr);
+    else
+    {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
+    }
+}
+
+
+MEM_STATIC U64 MEM_readLE64(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read64(memPtr);
+    else
+    {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
+                     + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
+    }
+}
+
+
+MEM_STATIC size_t MEM_readLEST(const void* memPtr)
+{
+    if (MEM_32bits())
+        return (size_t)MEM_readLE32(memPtr);
+    else
+        return (size_t)MEM_readLE64(memPtr);
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* MEM_H_MODULE */
+
+/*
+    zstd - standard compression library
+    Header File for static linking only
+*/
+#ifndef ZSTD_STATIC_H
+#define ZSTD_STATIC_H
+
+/* The objects defined into this file shall be considered experimental.
+ * They are not considered stable, as their prototype may change in the future.
+ * You can use them for tests, provide feedback, or if you can endure risks of future changes.
+ */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* *************************************
+*  Types
+***************************************/
+#define ZSTD_WINDOWLOG_MAX 26
+#define ZSTD_WINDOWLOG_MIN 18
+#define ZSTD_WINDOWLOG_ABSOLUTEMIN 11
+#define ZSTD_CONTENTLOG_MAX (ZSTD_WINDOWLOG_MAX+1)
+#define ZSTD_CONTENTLOG_MIN 4
+#define ZSTD_HASHLOG_MAX 28
+#define ZSTD_HASHLOG_MIN 4
+#define ZSTD_SEARCHLOG_MAX (ZSTD_CONTENTLOG_MAX-1)
+#define ZSTD_SEARCHLOG_MIN 1
+#define ZSTD_SEARCHLENGTH_MAX 7
+#define ZSTD_SEARCHLENGTH_MIN 4
+
+/** from faster to stronger */
+typedef enum { ZSTD_fast, ZSTD_greedy, ZSTD_lazy, ZSTD_lazy2, ZSTD_btlazy2 } ZSTD_strategy;
+
+typedef struct
+{
+    U64 srcSize;       /* optional : tells how much bytes are present in the frame. Use 0 if not known. */
+    U32 windowLog;     /* largest match distance : larger == more compression, more memory needed during decompression */
+    U32 contentLog;    /* full search segment : larger == more compression, slower, more memory (useless for fast) */
+    U32 hashLog;       /* dispatch table : larger == more memory, faster */
+    U32 searchLog;     /* nb of searches : larger == more compression, slower */
+    U32 searchLength;  /* size of matches : larger == faster decompression, sometimes less compression */
+    ZSTD_strategy strategy;
+} ZSTD_parameters;
+
+typedef ZSTDv04_Dctx ZSTD_DCtx;
+
+/* *************************************
+*  Advanced functions
+***************************************/
+/** ZSTD_decompress_usingDict
+*   Same as ZSTD_decompressDCtx, using a Dictionary content as prefix
+*   Note : dict can be NULL, in which case, it's equivalent to ZSTD_decompressDCtx() */
+static size_t ZSTD_decompress_usingDict(ZSTD_DCtx* ctx,
+                                             void* dst, size_t maxDstSize,
+                                       const void* src, size_t srcSize,
+                                       const void* dict,size_t dictSize);
+
+
+/* **************************************
+*  Streaming functions (direct mode)
+****************************************/
+static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx);
+static size_t ZSTD_getFrameParams(ZSTD_parameters* params, const void* src, size_t srcSize);
+static void   ZSTD_decompress_insertDictionary(ZSTD_DCtx* ctx, const void* src, size_t srcSize);
+
+static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
+static size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
+
+/**
+  Streaming decompression, bufferless mode
+
+  A ZSTD_DCtx object is required to track streaming operations.
+  Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
+  A ZSTD_DCtx object can be re-used multiple times. Use ZSTD_resetDCtx() to return to fresh status.
+
+  First operation is to retrieve frame parameters, using ZSTD_getFrameParams().
+  This function doesn't consume its input. It needs enough input data to properly decode the frame header.
+  Objective is to retrieve *params.windowlog, to know minimum amount of memory required during decoding.
+  Result : 0 when successful, it means the ZSTD_parameters structure has been filled.
+           >0 : means there is not enough data into src. Provides the expected size to successfully decode header.
+           errorCode, which can be tested using ZSTD_isError() (For example, if it's not a ZSTD header)
+
+  Then, you can optionally insert a dictionary.
+  This operation must mimic the compressor behavior, otherwise decompression will fail or be corrupted.
+
+  Then it's possible to start decompression.
+  Use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
+  ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+  ZSTD_decompressContinue() requires this exact amount of bytes, or it will fail.
+  ZSTD_decompressContinue() needs previous data blocks during decompression, up to (1 << windowlog).
+  They should preferably be located contiguously, prior to current block. Alternatively, a round buffer is also possible.
+
+  @result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst'.
+  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
+
+  A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
+  Context can then be reset to start a new decompression.
+*/
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+
+#endif  /* ZSTD_STATIC_H */
+
+
+/*
+    zstd_internal - common functions to include
+    Header File for include
+*/
+#ifndef ZSTD_CCOMMON_H_MODULE
+#define ZSTD_CCOMMON_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* *************************************
+*  Common macros
+***************************************/
+#define MIN(a,b) ((a)<(b) ? (a) : (b))
+#define MAX(a,b) ((a)>(b) ? (a) : (b))
+
+
+/* *************************************
+*  Common constants
+***************************************/
+#define ZSTD_MAGICNUMBER 0xFD2FB524   /* v0.4 */
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BLOCKSIZE (128 KB)                 /* define, for static allocation */
+
+static const size_t ZSTD_blockHeaderSize = 3;
+static const size_t ZSTD_frameHeaderSize_min = 5;
+#define ZSTD_frameHeaderSize_max 5         /* define, for static allocation */
+
+#define BIT7 128
+#define BIT6  64
+#define BIT5  32
+#define BIT4  16
+#define BIT1   2
+#define BIT0   1
+
+#define IS_RAW BIT0
+#define IS_RLE BIT1
+
+#define MINMATCH 4
+#define REPCODE_STARTVALUE 4
+
+#define MLbits   7
+#define LLbits   6
+#define Offbits  5
+#define MaxML  ((1<<MLbits) - 1)
+#define MaxLL  ((1<<LLbits) - 1)
+#define MaxOff ((1<<Offbits)- 1)
+#define MLFSELog   10
+#define LLFSELog   10
+#define OffFSELog   9
+#define MaxSeq MAX(MaxLL, MaxML)
+
+#define MIN_SEQUENCES_SIZE (2 /*seqNb*/ + 2 /*dumps*/ + 3 /*seqTables*/ + 1 /*bitStream*/)
+#define MIN_CBLOCK_SIZE (3 /*litCSize*/ + MIN_SEQUENCES_SIZE)
+
+typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
+
+
+/* ******************************************
+*  Shared functions to include for inlining
+********************************************/
+static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
+
+#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
+
+/*! ZSTD_wildcopy : custom version of memcpy(), can copy up to 7-8 bytes too many */
+static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
+{
+    const BYTE* ip = (const BYTE*)src;
+    BYTE* op = (BYTE*)dst;
+    BYTE* const oend = op + length;
+    do
+        COPY8(op, ip)
+    while (op < oend);
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+
+/* ******************************************************************
+   FSE : Finite State Entropy coder
+   header file
+****************************************************************** */
+#ifndef FSE_H
+#define FSE_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* *****************************************
+*  Includes
+******************************************/
+#include <stddef.h>    /* size_t, ptrdiff_t */
+
+
+/* *****************************************
+*  FSE simple functions
+******************************************/
+static size_t FSE_decompress(void* dst,  size_t maxDstSize,
+                const void* cSrc, size_t cSrcSize);
+/*!
+FSE_decompress():
+    Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
+    into already allocated destination buffer 'dst', of size 'maxDstSize'.
+    return : size of regenerated data (<= maxDstSize)
+             or an error code, which can be tested using FSE_isError()
+
+    ** Important ** : FSE_decompress() doesn't decompress non-compressible nor RLE data !!!
+    Why ? : making this distinction requires a header.
+    Header management is intentionally delegated to the user layer, which can better manage special cases.
+*/
+
+
+/* *****************************************
+*  Tool functions
+******************************************/
+/* Error Management */
+static unsigned    FSE_isError(size_t code);        /* tells if a return value is an error code */
+
+
+
+/* *****************************************
+*  FSE detailed API
+******************************************/
+/*!
+FSE_compress() does the following:
+1. count symbol occurrence from source[] into table count[]
+2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
+3. save normalized counters to memory buffer using writeNCount()
+4. build encoding table 'CTable' from normalized counters
+5. encode the data stream using encoding table 'CTable'
+
+FSE_decompress() does the following:
+1. read normalized counters with readNCount()
+2. build decoding table 'DTable' from normalized counters
+3. decode the data stream using decoding table 'DTable'
+
+The following API allows targeting specific sub-functions for advanced tasks.
+For example, it's possible to compress several blocks using the same 'CTable',
+or to save and provide normalized distribution using external method.
+*/
+
+
+/* *** DECOMPRESSION *** */
+
+/*!
+FSE_readNCount():
+   Read compactly saved 'normalizedCounter' from 'rBuffer'.
+   return : size read from 'rBuffer'
+            or an errorCode, which can be tested using FSE_isError()
+            maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
+static  size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
+
+/*!
+Constructor and Destructor of type FSE_DTable
+    Note that its size depends on 'tableLog' */
+typedef unsigned FSE_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
+
+/*!
+FSE_buildDTable():
+   Builds 'dt', which must be already allocated, using FSE_createDTable()
+   return : 0,
+            or an errorCode, which can be tested using FSE_isError() */
+static size_t FSE_buildDTable ( FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*!
+FSE_decompress_usingDTable():
+   Decompress compressed source 'cSrc' of size 'cSrcSize' using 'dt'
+   into 'dst' which must be already allocated.
+   return : size of regenerated data (necessarily <= maxDstSize)
+            or an errorCode, which can be tested using FSE_isError() */
+static  size_t FSE_decompress_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
+
+/*!
+Tutorial :
+----------
+(Note : these functions only decompress FSE-compressed blocks.
+ If block is uncompressed, use memcpy() instead
+ If block is a single repeated byte, use memset() instead )
+
+The first step is to obtain the normalized frequencies of symbols.
+This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
+In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
+or size the table to handle worst case situations (typically 256).
+FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
+The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
+Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
+If there is an error, the function will return an error code, which can be tested using FSE_isError().
+
+The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
+This is performed by the function FSE_buildDTable().
+The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
+If there is an error, the function will return an error code, which can be tested using FSE_isError().
+
+'FSE_DTable' can then be used to decompress 'cSrc', with FSE_decompress_usingDTable().
+'cSrcSize' must be strictly correct, otherwise decompression will fail.
+FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=maxDstSize).
+If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
+*/
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif  /* FSE_H */
+
+
+/* ******************************************************************
+   bitstream
+   Part of NewGen Entropy library
+   header file (to include)
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+   - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef BITSTREAM_H_MODULE
+#define BITSTREAM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*
+*  This API consists of small unitary functions, which highly benefit from being inlined.
+*  Since link-time-optimization is not available for all compilers,
+*  these functions are defined into a .h to be included.
+*/
+
+/**********************************************
+*  bitStream decompression API (read backward)
+**********************************************/
+typedef struct
+{
+    size_t   bitContainer;
+    unsigned bitsConsumed;
+    const char* ptr;
+    const char* start;
+} BIT_DStream_t;
+
+typedef enum { BIT_DStream_unfinished = 0,
+               BIT_DStream_endOfBuffer = 1,
+               BIT_DStream_completed = 2,
+               BIT_DStream_overflow = 3 } BIT_DStream_status;  /* result of BIT_reloadDStream() */
+               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+
+MEM_STATIC size_t   BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
+MEM_STATIC size_t   BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
+
+
+
+
+/******************************************
+*  unsafe API
+******************************************/
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
+/* faster, but works only if nbBits >= 1 */
+
+
+
+/****************************************************************
+*  Helper functions
+****************************************************************/
+MEM_STATIC unsigned BIT_highbit32 (U32 val)
+{
+#   if defined(_MSC_VER)   /* Visual */
+    unsigned long r=0;
+    _BitScanReverse ( &r, val );
+    return (unsigned) r;
+#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */
+    return 31 - __builtin_clz (val);
+#   else   /* Software version */
+    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+    U32 v = val;
+    unsigned r;
+    v |= v >> 1;
+    v |= v >> 2;
+    v |= v >> 4;
+    v |= v >> 8;
+    v |= v >> 16;
+    r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+    return r;
+#   endif
+}
+
+
+/**********************************************************
+* bitStream decoding
+**********************************************************/
+
+/*!BIT_initDStream
+*  Initialize a BIT_DStream_t.
+*  @bitD : a pointer to an already allocated BIT_DStream_t structure
+*  @srcBuffer must point at the beginning of a bitStream
+*  @srcSize must be the exact size of the bitStream
+*  @result : size of stream (== srcSize) or an errorCode if a problem is detected
+*/
+MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
+{
+    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
+
+    if (srcSize >=  sizeof(size_t))   /* normal case */
+    {
+        U32 contain32;
+        bitD->start = (const char*)srcBuffer;
+        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(size_t);
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);
+        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
+        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */
+        bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
+    }
+    else
+    {
+        U32 contain32;
+        bitD->start = (const char*)srcBuffer;
+        bitD->ptr   = bitD->start;
+        bitD->bitContainer = *(const BYTE*)(bitD->start);
+        switch(srcSize)
+        {
+            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);/* fall-through */
+            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);/* fall-through */
+            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);/* fall-through */
+            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fall-through */
+            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fall-through */
+            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) <<  8; /* fall-through */
+            default: break;
+        }
+        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
+        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */
+        bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
+        bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
+    }
+
+    return srcSize;
+}
+
+MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
+    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
+}
+
+/*! BIT_lookBitsFast :
+*   unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits)
+{
+    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
+    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
+}
+
+MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+    bitD->bitsConsumed += nbBits;
+}
+
+MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
+{
+    size_t value = BIT_lookBits(bitD, nbBits);
+    BIT_skipBits(bitD, nbBits);
+    return value;
+}
+
+/*!BIT_readBitsFast :
+*  unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
+{
+    size_t value = BIT_lookBitsFast(bitD, nbBits);
+    BIT_skipBits(bitD, nbBits);
+    return value;
+}
+
+MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
+{
+    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
+        return BIT_DStream_overflow;
+
+    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
+    {
+        bitD->ptr -= bitD->bitsConsumed >> 3;
+        bitD->bitsConsumed &= 7;
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);
+        return BIT_DStream_unfinished;
+    }
+    if (bitD->ptr == bitD->start)
+    {
+        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
+        return BIT_DStream_completed;
+    }
+    {
+        U32 nbBytes = bitD->bitsConsumed >> 3;
+        BIT_DStream_status result = BIT_DStream_unfinished;
+        if (bitD->ptr - nbBytes < bitD->start)
+        {
+            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */
+            result = BIT_DStream_endOfBuffer;
+        }
+        bitD->ptr -= nbBytes;
+        bitD->bitsConsumed -= nbBytes*8;
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */
+        return result;
+    }
+}
+
+/*! BIT_endOfDStream
+*   @return Tells if DStream has reached its exact end
+*/
+MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
+{
+    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* BITSTREAM_H_MODULE */
+
+
+
+/* ******************************************************************
+   FSE : Finite State Entropy coder
+   header file for static linking (only)
+   Copyright (C) 2013-2015, Yann Collet
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+   - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef FSE_STATIC_H
+#define FSE_STATIC_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* *****************************************
+*  Static allocation
+*******************************************/
+/* FSE buffer bounds */
+#define FSE_NCOUNTBOUND 512
+#define FSE_BLOCKBOUND(size) (size + (size>>7))
+#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
+
+/* It is possible to statically allocate FSE CTable/DTable as a table of unsigned using below macros */
+#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue)   (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
+#define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))
+
+
+/* *****************************************
+*  FSE advanced API
+*******************************************/
+static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
+/* build a fake FSE_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
+
+static size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
+/* build a fake FSE_DTable, designed to always generate the same symbolValue */
+
+
+
+/* *****************************************
+*  FSE symbol decompression API
+*******************************************/
+typedef struct
+{
+    size_t      state;
+    const void* table;   /* precise table may vary, depending on U16 */
+} FSE_DState_t;
+
+
+static void     FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
+
+static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+
+static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
+
+
+/* *****************************************
+*  FSE unsafe API
+*******************************************/
+static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
+/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
+
+
+/* *****************************************
+*  Implementation of inlined functions
+*******************************************/
+/* decompression */
+
+typedef struct {
+    U16 tableLog;
+    U16 fastMode;
+} FSE_DTableHeader;   /* sizeof U32 */
+
+typedef struct
+{
+    unsigned short newState;
+    unsigned char  symbol;
+    unsigned char  nbBits;
+} FSE_decode_t;   /* size == U32 */
+
+MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
+{
+    FSE_DTableHeader DTableH;
+    memcpy(&DTableH, dt, sizeof(DTableH));
+    DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog);
+    BIT_reloadDStream(bitD);
+    DStatePtr->table = dt + 1;
+}
+
+MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    const U32  nbBits = DInfo.nbBits;
+    BYTE symbol = DInfo.symbol;
+    size_t lowBits = BIT_readBits(bitD, nbBits);
+
+    DStatePtr->state = DInfo.newState + lowBits;
+    return symbol;
+}
+
+MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
+{
+    const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    const U32 nbBits = DInfo.nbBits;
+    BYTE symbol = DInfo.symbol;
+    size_t lowBits = BIT_readBitsFast(bitD, nbBits);
+
+    DStatePtr->state = DInfo.newState + lowBits;
+    return symbol;
+}
+
+MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
+{
+    return DStatePtr->state == 0;
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif  /* FSE_STATIC_H */
+
+/* ******************************************************************
+   FSE : Finite State Entropy coder
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+#ifndef FSE_COMMONDEFS_ONLY
+
+/* **************************************************************
+*  Tuning parameters
+****************************************************************/
+/*!MEMORY_USAGE :
+*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+*  Increasing memory usage improves compression ratio
+*  Reduced memory usage can improve speed, due to cache effect
+*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#define FSE_MAX_MEMORY_USAGE 14
+#define FSE_DEFAULT_MEMORY_USAGE 13
+
+/*!FSE_MAX_SYMBOL_VALUE :
+*  Maximum symbol value authorized.
+*  Required for proper stack allocation */
+#define FSE_MAX_SYMBOL_VALUE 255
+
+
+/* **************************************************************
+*  template functions type & suffix
+****************************************************************/
+#define FSE_FUNCTION_TYPE BYTE
+#define FSE_FUNCTION_EXTENSION
+#define FSE_DECODE_TYPE FSE_decode_t
+
+
+#endif   /* !FSE_COMMONDEFS_ONLY */
+
+/* **************************************************************
+*  Compiler specifics
+****************************************************************/
+#ifdef _MSC_VER    /* Visual Studio */
+#  define FORCE_INLINE static __forceinline
+#  include <intrin.h>                    /* For Visual 2005 */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */
+#else
+#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
+#    ifdef __GNUC__
+#      define FORCE_INLINE static inline __attribute__((always_inline))
+#    else
+#      define FORCE_INLINE static inline
+#    endif
+#  else
+#    define FORCE_INLINE static
+#  endif /* __STDC_VERSION__ */
+#endif
+
+
+/* **************************************************************
+*  Dependencies
+****************************************************************/
+#include <stdlib.h>     /* malloc, free, qsort */
+#include <string.h>     /* memcpy, memset */
+#include <stdio.h>      /* printf (debug) */
+
+
+/* ***************************************************************
+*  Constants
+*****************************************************************/
+#define FSE_MAX_TABLELOG  (FSE_MAX_MEMORY_USAGE-2)
+#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
+#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
+#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
+#define FSE_MIN_TABLELOG 5
+
+#define FSE_TABLELOG_ABSOLUTE_MAX 15
+#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
+#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+
+/* **************************************************************
+*  Error Management
+****************************************************************/
+#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+
+
+/* **************************************************************
+*  Complex types
+****************************************************************/
+typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
+
+
+/*-**************************************************************
+*  Templates
+****************************************************************/
+/*
+  designed to be included
+  for type-specific functions (template emulation in C)
+  Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSE_FUNCTION_EXTENSION
+#  error "FSE_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSE_FUNCTION_TYPE
+#  error "FSE_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSE_CAT(X,Y) X##Y
+#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
+#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
+
+static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
+
+
+static size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+    FSE_DTableHeader DTableH;
+    void* const tdPtr = dt+1;   /* because dt is unsigned, 32-bits aligned on 32-bits */
+    FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
+    const U32 tableSize = 1 << tableLog;
+    const U32 tableMask = tableSize-1;
+    const U32 step = FSE_tableStep(tableSize);
+    U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
+    U32 position = 0;
+    U32 highThreshold = tableSize-1;
+    const S16 largeLimit= (S16)(1 << (tableLog-1));
+    U32 noLarge = 1;
+    U32 s;
+
+    /* Sanity Checks */
+    if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
+    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+
+    /* Init, lay down lowprob symbols */
+    DTableH.tableLog = (U16)tableLog;
+    for (s=0; s<=maxSymbolValue; s++)
+    {
+        if (normalizedCounter[s]==-1)
+        {
+            tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
+            symbolNext[s] = 1;
+        }
+        else
+        {
+            if (normalizedCounter[s] >= largeLimit) noLarge=0;
+            symbolNext[s] = normalizedCounter[s];
+        }
+    }
+
+    /* Spread symbols */
+    for (s=0; s<=maxSymbolValue; s++)
+    {
+        int i;
+        for (i=0; i<normalizedCounter[s]; i++)
+        {
+            tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
+            position = (position + step) & tableMask;
+            while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
+        }
+    }
+
+    if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+
+    /* Build Decoding table */
+    {
+        U32 i;
+        for (i=0; i<tableSize; i++)
+        {
+            FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);
+            U16 nextState = symbolNext[symbol]++;
+            tableDecode[i].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) );
+            tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
+        }
+    }
+
+    DTableH.fastMode = (U16)noLarge;
+    memcpy(dt, &DTableH, sizeof(DTableH));
+    return 0;
+}
+
+
+#ifndef FSE_COMMONDEFS_ONLY
+/******************************************
+*  FSE helper functions
+******************************************/
+static unsigned FSE_isError(size_t code) { return ERR_isError(code); }
+
+
+/****************************************************************
+*  FSE NCount encoding-decoding
+****************************************************************/
+static short FSE_abs(short a)
+{
+    return a<0 ? -a : a;
+}
+
+static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+                 const void* headerBuffer, size_t hbSize)
+{
+    const BYTE* const istart = (const BYTE*) headerBuffer;
+    const BYTE* const iend = istart + hbSize;
+    const BYTE* ip = istart;
+    int nbBits;
+    int remaining;
+    int threshold;
+    U32 bitStream;
+    int bitCount;
+    unsigned charnum = 0;
+    int previous0 = 0;
+
+    if (hbSize < 4) return ERROR(srcSize_wrong);
+    bitStream = MEM_readLE32(ip);
+    nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG;   /* extract tableLog */
+    if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
+    bitStream >>= 4;
+    bitCount = 4;
+    *tableLogPtr = nbBits;
+    remaining = (1<<nbBits)+1;
+    threshold = 1<<nbBits;
+    nbBits++;
+
+    while ((remaining>1) && (charnum<=*maxSVPtr))
+    {
+        if (previous0)
+        {
+            unsigned n0 = charnum;
+            while ((bitStream & 0xFFFF) == 0xFFFF)
+            {
+                n0+=24;
+                if (ip < iend-5)
+                {
+                    ip+=2;
+                    bitStream = MEM_readLE32(ip) >> bitCount;
+                }
+                else
+                {
+                    bitStream >>= 16;
+                    bitCount+=16;
+                }
+            }
+            while ((bitStream & 3) == 3)
+            {
+                n0+=3;
+                bitStream>>=2;
+                bitCount+=2;
+            }
+            n0 += bitStream & 3;
+            bitCount += 2;
+            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
+            while (charnum < n0) normalizedCounter[charnum++] = 0;
+            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
+            {
+                ip += bitCount>>3;
+                bitCount &= 7;
+                bitStream = MEM_readLE32(ip) >> bitCount;
+            }
+            else
+                bitStream >>= 2;
+        }
+        {
+            const short max = (short)((2*threshold-1)-remaining);
+            short count;
+
+            if ((bitStream & (threshold-1)) < (U32)max)
+            {
+                count = (short)(bitStream & (threshold-1));
+                bitCount   += nbBits-1;
+            }
+            else
+            {
+                count = (short)(bitStream & (2*threshold-1));
+                if (count >= threshold) count -= max;
+                bitCount   += nbBits;
+            }
+
+            count--;   /* extra accuracy */
+            remaining -= FSE_abs(count);
+            normalizedCounter[charnum++] = count;
+            previous0 = !count;
+            while (remaining < threshold)
+            {
+                nbBits--;
+                threshold >>= 1;
+            }
+
+            {
+                if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
+                {
+                    ip += bitCount>>3;
+                    bitCount &= 7;
+                }
+                else
+                {
+                    bitCount -= (int)(8 * (iend - 4 - ip));
+                    ip = iend - 4;
+                }
+                bitStream = MEM_readLE32(ip) >> (bitCount & 31);
+            }
+        }
+    }
+    if (remaining != 1) return ERROR(GENERIC);
+    *maxSVPtr = charnum-1;
+
+    ip += (bitCount+7)>>3;
+    if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
+    return ip-istart;
+}
+
+
+/*********************************************************
+*  Decompression (Byte symbols)
+*********************************************************/
+static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
+{
+    void* ptr = dt;
+    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+    void* dPtr = dt + 1;
+    FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
+
+    DTableH->tableLog = 0;
+    DTableH->fastMode = 0;
+
+    cell->newState = 0;
+    cell->symbol = symbolValue;
+    cell->nbBits = 0;
+
+    return 0;
+}
+
+
+static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
+{
+    void* ptr = dt;
+    FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
+    void* dPtr = dt + 1;
+    FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
+    const unsigned tableSize = 1 << nbBits;
+    const unsigned tableMask = tableSize - 1;
+    const unsigned maxSymbolValue = tableMask;
+    unsigned s;
+
+    /* Sanity checks */
+    if (nbBits < 1) return ERROR(GENERIC);         /* min size */
+
+    /* Build Decoding Table */
+    DTableH->tableLog = (U16)nbBits;
+    DTableH->fastMode = 1;
+    for (s=0; s<=maxSymbolValue; s++)
+    {
+        dinfo[s].newState = 0;
+        dinfo[s].symbol = (BYTE)s;
+        dinfo[s].nbBits = (BYTE)nbBits;
+    }
+
+    return 0;
+}
+
+FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
+          void* dst, size_t maxDstSize,
+    const void* cSrc, size_t cSrcSize,
+    const FSE_DTable* dt, const unsigned fast)
+{
+    BYTE* const ostart = (BYTE*) dst;
+    BYTE* op = ostart;
+    BYTE* const omax = op + maxDstSize;
+    BYTE* const olimit = omax-3;
+
+    BIT_DStream_t bitD;
+    FSE_DState_t state1;
+    FSE_DState_t state2;
+    size_t errorCode;
+
+    /* Init */
+    errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);   /* replaced last arg by maxCompressed Size */
+    if (FSE_isError(errorCode)) return errorCode;
+
+    FSE_initDState(&state1, &bitD, dt);
+    FSE_initDState(&state2, &bitD, dt);
+
+#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
+
+    /* 4 symbols per loop */
+    for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op<olimit) ; op+=4)
+    {
+        op[0] = FSE_GETSYMBOL(&state1);
+
+        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            BIT_reloadDStream(&bitD);
+
+        op[1] = FSE_GETSYMBOL(&state2);
+
+        if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
+
+        op[2] = FSE_GETSYMBOL(&state1);
+
+        if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            BIT_reloadDStream(&bitD);
+
+        op[3] = FSE_GETSYMBOL(&state2);
+    }
+
+    /* tail */
+    /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
+    while (1)
+    {
+        if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )
+            break;
+
+        *op++ = FSE_GETSYMBOL(&state1);
+
+        if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )
+            break;
+
+        *op++ = FSE_GETSYMBOL(&state2);
+    }
+
+    /* end ? */
+    if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))
+        return op-ostart;
+
+    if (op==omax) return ERROR(dstSize_tooSmall);   /* dst buffer is full, but cSrc unfinished */
+
+    return ERROR(corruption_detected);
+}
+
+
+static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
+                            const void* cSrc, size_t cSrcSize,
+                            const FSE_DTable* dt)
+{
+    FSE_DTableHeader DTableH;
+    U32 fastMode;
+
+    memcpy(&DTableH, dt, sizeof(DTableH));
+    fastMode = DTableH.fastMode;
+
+    /* select fast mode (static) */
+    if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+    return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+
+static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
+{
+    const BYTE* const istart = (const BYTE*)cSrc;
+    const BYTE* ip = istart;
+    short counting[FSE_MAX_SYMBOL_VALUE+1];
+    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */
+    unsigned tableLog;
+    unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
+    size_t errorCode;
+
+    if (cSrcSize<2) return ERROR(srcSize_wrong);   /* too small input size */
+
+    /* normal FSE decoding mode */
+    errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
+    if (FSE_isError(errorCode)) return errorCode;
+    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size */
+    ip += errorCode;
+    cSrcSize -= errorCode;
+
+    errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);
+    if (FSE_isError(errorCode)) return errorCode;
+
+    /* always return, even if it is an error code */
+    return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
+}
+
+
+
+#endif   /* FSE_COMMONDEFS_ONLY */
+
+
+/* ******************************************************************
+   Huff0 : Huffman coder, part of New Generation Entropy library
+   header file
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+   - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef HUFF0_H
+#define HUFF0_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* ****************************************
+*  Dependency
+******************************************/
+#include <stddef.h>    /* size_t */
+
+
+/* ****************************************
+*  Huff0 simple functions
+******************************************/
+static size_t HUF_decompress(void* dst,  size_t dstSize,
+                const void* cSrc, size_t cSrcSize);
+/*!
+HUF_decompress():
+    Decompress Huff0 data from buffer 'cSrc', of size 'cSrcSize',
+    into already allocated destination buffer 'dst', of size 'dstSize'.
+    'dstSize' must be the exact size of original (uncompressed) data.
+    Note : in contrast with FSE, HUF_decompress can regenerate RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, because it knows size to regenerate.
+    @return : size of regenerated data (== dstSize)
+              or an error code, which can be tested using HUF_isError()
+*/
+
+
+/* ****************************************
+*  Tool functions
+******************************************/
+/* Error Management */
+static unsigned    HUF_isError(size_t code);        /* tells if a return value is an error code */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif   /* HUFF0_H */
+
+
+/* ******************************************************************
+   Huff0 : Huffman coder, part of New Generation Entropy library
+   header file for static linking (only)
+   Copyright (C) 2013-2015, Yann Collet
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+   - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef HUFF0_STATIC_H
+#define HUFF0_STATIC_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+
+/* ****************************************
+*  Static allocation macros
+******************************************/
+/* static allocation of Huff0's DTable */
+#define HUF_DTABLE_SIZE(maxTableLog)   (1 + (1<<maxTableLog))  /* nb Cells; use unsigned short for X2, unsigned int for X4 */
+#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
+        unsigned short DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
+#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
+        unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
+#define HUF_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
+        unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
+
+
+/* ****************************************
+*  Advanced decompression functions
+******************************************/
+static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
+static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbols decoder */
+
+
+/* ****************************************
+*  Huff0 detailed API
+******************************************/
+/*!
+HUF_decompress() does the following:
+1. select the decompression algorithm (X2, X4, X6) based on pre-computed heuristics
+2. build Huffman table from save, using HUF_readDTableXn()
+3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable
+
+*/
+static size_t HUF_readDTableX2 (unsigned short* DTable, const void* src, size_t srcSize);
+static size_t HUF_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize);
+
+static size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);
+static size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* HUFF0_STATIC_H */
+
+
+
+/* ******************************************************************
+   Huff0 : Huffman coder, part of New Generation Entropy library
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+
+/* **************************************************************
+*  Compiler specifics
+****************************************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+/* inline is defined */
+#elif defined(_MSC_VER)
+#  define inline __inline
+#else
+#  define inline /* disable inline */
+#endif
+
+
+#ifdef _MSC_VER    /* Visual Studio */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#endif
+
+
+/* **************************************************************
+*  Includes
+****************************************************************/
+#include <stdlib.h>     /* malloc, free, qsort */
+#include <string.h>     /* memcpy, memset */
+#include <stdio.h>      /* printf (debug) */
+
+
+/* **************************************************************
+*  Constants
+****************************************************************/
+#define HUF_ABSOLUTEMAX_TABLELOG  16   /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
+#define HUF_MAX_TABLELOG  12           /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
+#define HUF_DEFAULT_TABLELOG  HUF_MAX_TABLELOG   /* tableLog by default, when not specified */
+#define HUF_MAX_SYMBOL_VALUE 255
+#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)
+#  error "HUF_MAX_TABLELOG is too large !"
+#endif
+
+
+/* **************************************************************
+*  Error Management
+****************************************************************/
+static unsigned HUF_isError(size_t code) { return ERR_isError(code); }
+#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+
+
+
+/*-*******************************************************
+*  Huff0 : Huffman block decompression
+*********************************************************/
+typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2;   /* single-symbol decoding */
+
+typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4;  /* double-symbols decoding */
+
+typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
+
+/*! HUF_readStats
+    Read compact Huffman tree, saved by HUF_writeCTable
+    @huffWeight : destination buffer
+    @return : size read from `src`
+*/
+static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+                            U32* nbSymbolsPtr, U32* tableLogPtr,
+                            const void* src, size_t srcSize)
+{
+    U32 weightTotal;
+    U32 tableLog;
+    const BYTE* ip = (const BYTE*) src;
+    size_t iSize;
+    size_t oSize;
+    U32 n;
+
+    if (!srcSize) return ERROR(srcSize_wrong);
+    iSize = ip[0];
+    //memset(huffWeight, 0, hwSize);   /* is not necessary, even though some analyzer complain ... */
+
+    if (iSize >= 128)  /* special header */
+    {
+        if (iSize >= (242))   /* RLE */
+        {
+            static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
+            oSize = l[iSize-242];
+            memset(huffWeight, 1, hwSize);
+            iSize = 0;
+        }
+        else   /* Incompressible */
+        {
+            oSize = iSize - 127;
+            iSize = ((oSize+1)/2);
+            if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+            if (oSize >= hwSize) return ERROR(corruption_detected);
+            ip += 1;
+            for (n=0; n<oSize; n+=2)
+            {
+                huffWeight[n]   = ip[n/2] >> 4;
+                huffWeight[n+1] = ip[n/2] & 15;
+            }
+        }
+    }
+    else  /* header compressed with FSE (normal case) */
+    {
+        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+        oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize);   /* max (hwSize-1) values decoded, as last one is implied */
+        if (FSE_isError(oSize)) return oSize;
+    }
+
+    /* collect weight stats */
+    memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
+    weightTotal = 0;
+    for (n=0; n<oSize; n++)
+    {
+        if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
+        rankStats[huffWeight[n]]++;
+        weightTotal += (1 << huffWeight[n]) >> 1;
+    }
+    if (weightTotal == 0) return ERROR(corruption_detected);
+
+    /* get last non-null symbol weight (implied, total must be 2^n) */
+    tableLog = BIT_highbit32(weightTotal) + 1;
+    if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
+    {
+        U32 total = 1 << tableLog;
+        U32 rest = total - weightTotal;
+        U32 verif = 1 << BIT_highbit32(rest);
+        U32 lastWeight = BIT_highbit32(rest) + 1;
+        if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */
+        huffWeight[oSize] = (BYTE)lastWeight;
+        rankStats[lastWeight]++;
+    }
+
+    /* check tree construction validity */
+    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */
+
+    /* results */
+    *nbSymbolsPtr = (U32)(oSize+1);
+    *tableLogPtr = tableLog;
+    return iSize+1;
+}
+
+
+/**************************/
+/* single-symbol decoding */
+/**************************/
+
+static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
+{
+    BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];
+    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];   /* large enough for values from 0 to 16 */
+    U32 tableLog = 0;
+    size_t iSize;
+    U32 nbSymbols = 0;
+    U32 n;
+    U32 nextRankStart;
+    void* const dtPtr = DTable + 1;
+    HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
+
+    HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16));   /* if compilation fails here, assertion is false */
+    //memset(huffWeight, 0, sizeof(huffWeight));   /* is not necessary, even though some analyzer complain ... */
+
+    iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
+    if (HUF_isError(iSize)) return iSize;
+
+    /* check result */
+    if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge);   /* DTable is too small */
+    DTable[0] = (U16)tableLog;   /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */
+
+    /* Prepare ranks */
+    nextRankStart = 0;
+    for (n=1; n<=tableLog; n++)
+    {
+        U32 current = nextRankStart;
+        nextRankStart += (rankVal[n] << (n-1));
+        rankVal[n] = current;
+    }
+
+    /* fill DTable */
+    for (n=0; n<nbSymbols; n++)
+    {
+        const U32 w = huffWeight[n];
+        const U32 length = (1 << w) >> 1;
+        U32 i;
+        HUF_DEltX2 D;
+        D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
+        for (i = rankVal[w]; i < rankVal[w] + length; i++)
+            dt[i] = D;
+        rankVal[w] += length;
+    }
+
+    return iSize;
+}
+
+static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
+{
+        const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+        const BYTE c = dt[val].byte;
+        BIT_skipBits(Dstream, dt[val].nbBits);
+        return c;
+}
+
+#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+    *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+    if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
+        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+    if (MEM_64bits()) \
+        HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
+{
+    BYTE* const pStart = p;
+
+    /* up to 4 symbols at a time */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))
+    {
+        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
+        HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+    }
+
+    /* closer to the end */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
+        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+    /* no more data to retrieve from bitstream, hence no need to reload */
+    while (p < pEnd)
+        HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+    return pEnd-pStart;
+}
+
+
+static size_t HUF_decompress4X2_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const U16* DTable)
+{
+    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
+
+    {
+        const BYTE* const istart = (const BYTE*) cSrc;
+        BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+        const void* const dtPtr = DTable;
+        const HUF_DEltX2* const dt = ((const HUF_DEltX2*)dtPtr) +1;
+        const U32 dtLog = DTable[0];
+        size_t errorCode;
+
+        /* Init */
+        BIT_DStream_t bitD1;
+        BIT_DStream_t bitD2;
+        BIT_DStream_t bitD3;
+        BIT_DStream_t bitD4;
+        const size_t length1 = MEM_readLE16(istart);
+        const size_t length2 = MEM_readLE16(istart+2);
+        const size_t length3 = MEM_readLE16(istart+4);
+        size_t length4;
+        const BYTE* const istart1 = istart + 6;  /* jumpTable */
+        const BYTE* const istart2 = istart1 + length1;
+        const BYTE* const istart3 = istart2 + length2;
+        const BYTE* const istart4 = istart3 + length3;
+        const size_t segmentSize = (dstSize+3) / 4;
+        BYTE* const opStart2 = ostart + segmentSize;
+        BYTE* const opStart3 = opStart2 + segmentSize;
+        BYTE* const opStart4 = opStart3 + segmentSize;
+        BYTE* op1 = ostart;
+        BYTE* op2 = opStart2;
+        BYTE* op3 = opStart3;
+        BYTE* op4 = opStart4;
+        U32 endSignal;
+
+        length4 = cSrcSize - (length1 + length2 + length3 + 6);
+        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
+        errorCode = BIT_initDStream(&bitD1, istart1, length1);
+        if (HUF_isError(errorCode)) return errorCode;
+        errorCode = BIT_initDStream(&bitD2, istart2, length2);
+        if (HUF_isError(errorCode)) return errorCode;
+        errorCode = BIT_initDStream(&bitD3, istart3, length3);
+        if (HUF_isError(errorCode)) return errorCode;
+        errorCode = BIT_initDStream(&bitD4, istart4, length4);
+        if (HUF_isError(errorCode)) return errorCode;
+
+        /* 16-32 symbols per loop (4-8 symbols per stream) */
+        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
+        {
+            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
+            HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
+            HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
+            HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
+            HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
+
+            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        }
+
+        /* check corruption */
+        if (op1 > opStart2) return ERROR(corruption_detected);
+        if (op2 > opStart3) return ERROR(corruption_detected);
+        if (op3 > opStart4) return ERROR(corruption_detected);
+        /* note : op4 supposed already verified within main loop */
+
+        /* finish bitStreams one by one */
+        HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+        HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+        HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+        HUF_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
+
+        /* check */
+        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+        if (!endSignal) return ERROR(corruption_detected);
+
+        /* decoded size */
+        return dstSize;
+    }
+}
+
+
+static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG);
+    const BYTE* ip = (const BYTE*) cSrc;
+    size_t errorCode;
+
+    errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize);
+    if (HUF_isError(errorCode)) return errorCode;
+    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += errorCode;
+    cSrcSize -= errorCode;
+
+    return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+/***************************/
+/* double-symbols decoding */
+/***************************/
+
+static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,
+                           const U32* rankValOrigin, const int minWeight,
+                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
+                           U32 nbBitsBaseline, U16 baseSeq)
+{
+    HUF_DEltX4 DElt;
+    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
+    U32 s;
+
+    /* get pre-calculated rankVal */
+    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+    /* fill skipped values */
+    if (minWeight>1)
+    {
+        U32 i, skipSize = rankVal[minWeight];
+        MEM_writeLE16(&(DElt.sequence), baseSeq);
+        DElt.nbBits   = (BYTE)(consumed);
+        DElt.length   = 1;
+        for (i = 0; i < skipSize; i++)
+            DTable[i] = DElt;
+    }
+
+    /* fill DTable */
+    for (s=0; s<sortedListSize; s++)   /* note : sortedSymbols already skipped */
+    {
+        const U32 symbol = sortedSymbols[s].symbol;
+        const U32 weight = sortedSymbols[s].weight;
+        const U32 nbBits = nbBitsBaseline - weight;
+        const U32 length = 1 << (sizeLog-nbBits);
+        const U32 start = rankVal[weight];
+        U32 i = start;
+        const U32 end = start + length;
+
+        MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
+        DElt.nbBits = (BYTE)(nbBits + consumed);
+        DElt.length = 2;
+        do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */
+
+        rankVal[weight] += length;
+    }
+}
+
+typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1];
+
+static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
+                           const sortedSymbol_t* sortedList, const U32 sortedListSize,
+                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
+                           const U32 nbBitsBaseline)
+{
+    U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
+    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */
+    const U32 minBits  = nbBitsBaseline - maxWeight;
+    U32 s;
+
+    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+    /* fill DTable */
+    for (s=0; s<sortedListSize; s++)
+    {
+        const U16 symbol = sortedList[s].symbol;
+        const U32 weight = sortedList[s].weight;
+        const U32 nbBits = nbBitsBaseline - weight;
+        const U32 start = rankVal[weight];
+        const U32 length = 1 << (targetLog-nbBits);
+
+        if (targetLog-nbBits >= minBits)   /* enough room for a second symbol */
+        {
+            U32 sortedRank;
+            int minWeight = nbBits + scaleLog;
+            if (minWeight < 1) minWeight = 1;
+            sortedRank = rankStart[minWeight];
+            HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
+                           rankValOrigin[nbBits], minWeight,
+                           sortedList+sortedRank, sortedListSize-sortedRank,
+                           nbBitsBaseline, symbol);
+        }
+        else
+        {
+            U32 i;
+            const U32 end = start + length;
+            HUF_DEltX4 DElt;
+
+            MEM_writeLE16(&(DElt.sequence), symbol);
+            DElt.nbBits   = (BYTE)(nbBits);
+            DElt.length   = 1;
+            for (i = start; i < end; i++)
+                DTable[i] = DElt;
+        }
+        rankVal[weight] += length;
+    }
+}
+
+static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
+{
+    BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];
+    sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];
+    U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
+    U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
+    U32* const rankStart = rankStart0+1;
+    rankVal_t rankVal;
+    U32 tableLog, maxW, sizeOfSort, nbSymbols;
+    const U32 memLog = DTable[0];
+    size_t iSize;
+    void* dtPtr = DTable;
+    HUF_DEltX4* const dt = ((HUF_DEltX4*)dtPtr) + 1;
+
+    HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32));   /* if compilation fails here, assertion is false */
+    if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
+    //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */
+
+    iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
+    if (HUF_isError(iSize)) return iSize;
+
+    /* check result */
+    if (tableLog > memLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */
+
+    /* find maxWeight */
+    for (maxW = tableLog; rankStats[maxW]==0; maxW--)
+        { if (!maxW) return ERROR(GENERIC); }  /* necessarily finds a solution before maxW==0 */
+
+    /* Get start index of each weight */
+    {
+        U32 w, nextRankStart = 0;
+        for (w=1; w<=maxW; w++)
+        {
+            U32 current = nextRankStart;
+            nextRankStart += rankStats[w];
+            rankStart[w] = current;
+        }
+        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/
+        sizeOfSort = nextRankStart;
+    }
+
+    /* sort symbols by weight */
+    {
+        U32 s;
+        for (s=0; s<nbSymbols; s++)
+        {
+            U32 w = weightList[s];
+            U32 r = rankStart[w]++;
+            sortedSymbol[r].symbol = (BYTE)s;
+            sortedSymbol[r].weight = (BYTE)w;
+        }
+        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
+    }
+
+    /* Build rankVal */
+    {
+        const U32 minBits = tableLog+1 - maxW;
+        U32 nextRankVal = 0;
+        U32 w, consumed;
+        const int rescale = (memLog-tableLog) - 1;   /* tableLog <= memLog */
+        U32* rankVal0 = rankVal[0];
+        for (w=1; w<=maxW; w++)
+        {
+            U32 current = nextRankVal;
+            nextRankVal += rankStats[w] << (w+rescale);
+            rankVal0[w] = current;
+        }
+        for (consumed = minBits; consumed <= memLog - minBits; consumed++)
+        {
+            U32* rankValPtr = rankVal[consumed];
+            for (w = 1; w <= maxW; w++)
+            {
+                rankValPtr[w] = rankVal0[w] >> consumed;
+            }
+        }
+    }
+
+    HUF_fillDTableX4(dt, memLog,
+                   sortedSymbol, sizeOfSort,
+                   rankStart0, rankVal, maxW,
+                   tableLog+1);
+
+    return iSize;
+}
+
+
+static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
+{
+    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    memcpy(op, dt+val, 2);
+    BIT_skipBits(DStream, dt[val].nbBits);
+    return dt[val].length;
+}
+
+static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
+{
+    const size_t val = BIT_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    memcpy(op, dt+val, 1);
+    if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
+    else
+    {
+        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))
+        {
+            BIT_skipBits(DStream, dt[val].nbBits);
+            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+        }
+    }
+    return 1;
+}
+
+
+#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
+    ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
+    if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
+        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
+    if (MEM_64bits()) \
+        ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
+{
+    BYTE* const pStart = p;
+
+    /* up to 8 symbols at a time */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7))
+    {
+        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
+        HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
+        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
+    }
+
+    /* closer to the end */
+    while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2))
+        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
+
+    while (p <= pEnd-2)
+        HUF_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
+
+    if (p < pEnd)
+        p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
+
+    return p-pStart;
+}
+
+static size_t HUF_decompress4X4_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const U32* DTable)
+{
+    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
+
+    {
+        const BYTE* const istart = (const BYTE*) cSrc;
+        BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+        const void* const dtPtr = DTable;
+        const HUF_DEltX4* const dt = ((const HUF_DEltX4*)dtPtr) +1;
+        const U32 dtLog = DTable[0];
+        size_t errorCode;
+
+        /* Init */
+        BIT_DStream_t bitD1;
+        BIT_DStream_t bitD2;
+        BIT_DStream_t bitD3;
+        BIT_DStream_t bitD4;
+        const size_t length1 = MEM_readLE16(istart);
+        const size_t length2 = MEM_readLE16(istart+2);
+        const size_t length3 = MEM_readLE16(istart+4);
+        size_t length4;
+        const BYTE* const istart1 = istart + 6;  /* jumpTable */
+        const BYTE* const istart2 = istart1 + length1;
+        const BYTE* const istart3 = istart2 + length2;
+        const BYTE* const istart4 = istart3 + length3;
+        const size_t segmentSize = (dstSize+3) / 4;
+        BYTE* const opStart2 = ostart + segmentSize;
+        BYTE* const opStart3 = opStart2 + segmentSize;
+        BYTE* const opStart4 = opStart3 + segmentSize;
+        BYTE* op1 = ostart;
+        BYTE* op2 = opStart2;
+        BYTE* op3 = opStart3;
+        BYTE* op4 = opStart4;
+        U32 endSignal;
+
+        length4 = cSrcSize - (length1 + length2 + length3 + 6);
+        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
+        errorCode = BIT_initDStream(&bitD1, istart1, length1);
+        if (HUF_isError(errorCode)) return errorCode;
+        errorCode = BIT_initDStream(&bitD2, istart2, length2);
+        if (HUF_isError(errorCode)) return errorCode;
+        errorCode = BIT_initDStream(&bitD3, istart3, length3);
+        if (HUF_isError(errorCode)) return errorCode;
+        errorCode = BIT_initDStream(&bitD4, istart4, length4);
+        if (HUF_isError(errorCode)) return errorCode;
+
+        /* 16-32 symbols per loop (4-8 symbols per stream) */
+        endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
+        {
+            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
+            HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
+            HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
+            HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
+            HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
+            HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
+            HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
+            HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
+            HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
+            HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
+            HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
+            HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
+
+            endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
+        }
+
+        /* check corruption */
+        if (op1 > opStart2) return ERROR(corruption_detected);
+        if (op2 > opStart3) return ERROR(corruption_detected);
+        if (op3 > opStart4) return ERROR(corruption_detected);
+        /* note : op4 supposed already verified within main loop */
+
+        /* finish bitStreams one by one */
+        HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
+        HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
+        HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
+        HUF_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);
+
+        /* check */
+        endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
+        if (!endSignal) return ERROR(corruption_detected);
+
+        /* decoded size */
+        return dstSize;
+    }
+}
+
+
+static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG);
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize);
+    if (HUF_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize;
+    cSrcSize -= hSize;
+
+    return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+/**********************************/
+/* Generic decompression selector */
+/**********************************/
+
+typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
+static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
+{
+    /* single, double, quad */
+    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */
+    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */
+    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */
+    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */
+    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */
+    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */
+    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */
+    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */
+    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */
+    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */
+    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
+    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
+    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
+    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */
+    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */
+    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */
+};
+
+typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+
+static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    static const decompressionAlgo decompress[3] = { HUF_decompress4X2, HUF_decompress4X4, NULL };
+    /* estimate decompression time */
+    U32 Q;
+    const U32 D256 = (U32)(dstSize >> 8);
+    U32 Dtime[3];
+    U32 algoNb = 0;
+    int n;
+
+    /* validation checks */
+    if (dstSize == 0) return ERROR(dstSize_tooSmall);
+    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
+    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
+    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
+
+    /* decoder timing evaluation */
+    Q = (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 since dstSize > cSrcSize */
+    for (n=0; n<3; n++)
+        Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
+
+    Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
+
+    if (Dtime[1] < Dtime[0]) algoNb = 1;
+
+    return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
+
+    //return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize);   /* multi-streams single-symbol decoding */
+    //return HUF_decompress4X4(dst, dstSize, cSrc, cSrcSize);   /* multi-streams double-symbols decoding */
+    //return HUF_decompress4X6(dst, dstSize, cSrc, cSrcSize);   /* multi-streams quad-symbols decoding */
+}
+
+
+
+#endif   /* ZSTD_CCOMMON_H_MODULE */
+
+
+/*
+    zstd - decompression module fo v0.4 legacy format
+    Copyright (C) 2015-2016, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd source repository : https://github.com/Cyan4973/zstd
+    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+/* ***************************************************************
+*  Tuning parameters
+*****************************************************************/
+/*!
+ * HEAPMODE :
+ * Select how default decompression function ZSTD_decompress() will allocate memory,
+ * in memory stack (0), or in memory heap (1, requires malloc())
+ */
+#ifndef ZSTD_HEAPMODE
+#  define ZSTD_HEAPMODE 1
+#endif
+
+
+/* *******************************************************
+*  Includes
+*********************************************************/
+#include <stdlib.h>      /* calloc */
+#include <string.h>      /* memcpy, memmove */
+#include <stdio.h>       /* debug : printf */
+
+
+/* *******************************************************
+*  Compiler specifics
+*********************************************************/
+#ifdef _MSC_VER    /* Visual Studio */
+#  include <intrin.h>                    /* For Visual 2005 */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */
+#endif
+
+
+/* *************************************
+*  Local types
+***************************************/
+typedef struct
+{
+    blockType_t blockType;
+    U32 origSize;
+} blockProperties_t;
+
+
+/* *******************************************************
+*  Memory operations
+**********************************************************/
+static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
+
+
+/* *************************************
+*  Error Management
+***************************************/
+
+/*! ZSTD_isError
+*   tells if a return value is an error code */
+static unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
+
+
+/* *************************************************************
+*   Context management
+***************************************************************/
+typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
+               ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock } ZSTD_dStage;
+
+struct ZSTDv04_Dctx_s
+{
+    U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
+    U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
+    U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
+    const void* previousDstEnd;
+    const void* base;
+    const void* vBase;
+    const void* dictEnd;
+    size_t expected;
+    size_t headerSize;
+    ZSTD_parameters params;
+    blockType_t bType;
+    ZSTD_dStage stage;
+    const BYTE* litPtr;
+    size_t litSize;
+    BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */];
+    BYTE headerBuffer[ZSTD_frameHeaderSize_max];
+};  /* typedef'd to ZSTD_DCtx within "zstd_static.h" */
+
+static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx)
+{
+    dctx->expected = ZSTD_frameHeaderSize_min;
+    dctx->stage = ZSTDds_getFrameHeaderSize;
+    dctx->previousDstEnd = NULL;
+    dctx->base = NULL;
+    dctx->vBase = NULL;
+    dctx->dictEnd = NULL;
+    return 0;
+}
+
+static ZSTD_DCtx* ZSTD_createDCtx(void)
+{
+    ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx));
+    if (dctx==NULL) return NULL;
+    ZSTD_resetDCtx(dctx);
+    return dctx;
+}
+
+static size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
+{
+    free(dctx);
+    return 0;
+}
+
+
+/* *************************************************************
+*   Decompression section
+***************************************************************/
+/** ZSTD_decodeFrameHeader_Part1
+*   decode the 1st part of the Frame Header, which tells Frame Header size.
+*   srcSize must be == ZSTD_frameHeaderSize_min
+*   @return : the full size of the Frame Header */
+static size_t ZSTD_decodeFrameHeader_Part1(ZSTD_DCtx* zc, const void* src, size_t srcSize)
+{
+    U32 magicNumber;
+    if (srcSize != ZSTD_frameHeaderSize_min) return ERROR(srcSize_wrong);
+    magicNumber = MEM_readLE32(src);
+    if (magicNumber != ZSTD_MAGICNUMBER) return ERROR(prefix_unknown);
+    zc->headerSize = ZSTD_frameHeaderSize_min;
+    return zc->headerSize;
+}
+
+
+static size_t ZSTD_getFrameParams(ZSTD_parameters* params, const void* src, size_t srcSize)
+{
+    U32 magicNumber;
+    if (srcSize < ZSTD_frameHeaderSize_min) return ZSTD_frameHeaderSize_max;
+    magicNumber = MEM_readLE32(src);
+    if (magicNumber != ZSTD_MAGICNUMBER) return ERROR(prefix_unknown);
+    memset(params, 0, sizeof(*params));
+    params->windowLog = (((const BYTE*)src)[4] & 15) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
+    if ((((const BYTE*)src)[4] >> 4) != 0) return ERROR(frameParameter_unsupported);   /* reserved bits */
+    return 0;
+}
+
+/** ZSTD_decodeFrameHeader_Part2
+*   decode the full Frame Header
+*   srcSize must be the size provided by ZSTD_decodeFrameHeader_Part1
+*   @return : 0, or an error code, which can be tested using ZSTD_isError() */
+static size_t ZSTD_decodeFrameHeader_Part2(ZSTD_DCtx* zc, const void* src, size_t srcSize)
+{
+    size_t result;
+    if (srcSize != zc->headerSize) return ERROR(srcSize_wrong);
+    result = ZSTD_getFrameParams(&(zc->params), src, srcSize);
+    if ((MEM_32bits()) && (zc->params.windowLog > 25)) return ERROR(frameParameter_unsupported);
+    return result;
+}
+
+
+static size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
+{
+    const BYTE* const in = (const BYTE* const)src;
+    BYTE headerFlags;
+    U32 cSize;
+
+    if (srcSize < 3) return ERROR(srcSize_wrong);
+
+    headerFlags = *in;
+    cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
+
+    bpPtr->blockType = (blockType_t)(headerFlags >> 6);
+    bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
+
+    if (bpPtr->blockType == bt_end) return 0;
+    if (bpPtr->blockType == bt_rle) return 1;
+    return cSize;
+}
+
+static size_t ZSTD_copyRawBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
+    memcpy(dst, src, srcSize);
+    return srcSize;
+}
+
+
+/** ZSTD_decompressLiterals
+    @return : nb of bytes read from src, or an error code*/
+static size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr,
+                                const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+
+    const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+    const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+
+    if (litSize > *maxDstSizePtr) return ERROR(corruption_detected);
+    if (litCSize + 5 > srcSize) return ERROR(corruption_detected);
+
+    if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected);
+
+    *maxDstSizePtr = litSize;
+    return litCSize + 5;
+}
+
+
+/** ZSTD_decodeLiteralsBlock
+    @return : nb of bytes read from src (< srcSize ) */
+static size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
+                          const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
+{
+    const BYTE* const istart = (const BYTE*) src;
+
+    /* any compressed block with literals segment must be at least this size */
+    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
+
+    switch(*istart & 3)
+    {
+    /* compressed */
+    case 0:
+        {
+            size_t litSize = BLOCKSIZE;
+            const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize);
+            dctx->litPtr = dctx->litBuffer;
+            dctx->litSize = litSize;
+            memset(dctx->litBuffer + dctx->litSize, 0, 8);
+            return readSize;   /* works if it's an error too */
+        }
+    case IS_RAW:
+        {
+            const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+            if (litSize > srcSize-11)   /* risk of reading too far with wildcopy */
+            {
+                if (litSize > srcSize-3) return ERROR(corruption_detected);
+                memcpy(dctx->litBuffer, istart, litSize);
+                dctx->litPtr = dctx->litBuffer;
+                dctx->litSize = litSize;
+                memset(dctx->litBuffer + dctx->litSize, 0, 8);
+                return litSize+3;
+            }
+            /* direct reference into compressed stream */
+            dctx->litPtr = istart+3;
+            dctx->litSize = litSize;
+            return litSize+3;        }
+    case IS_RLE:
+        {
+            const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
+            if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
+            memset(dctx->litBuffer, istart[3], litSize + 8);
+            dctx->litPtr = dctx->litBuffer;
+            dctx->litSize = litSize;
+            return 4;
+        }
+    default:
+        return ERROR(corruption_detected);   /* forbidden nominal case */
+    }
+}
+
+
+static size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
+                         FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,
+                         const void* src, size_t srcSize)
+{
+    const BYTE* const istart = (const BYTE* const)src;
+    const BYTE* ip = istart;
+    const BYTE* const iend = istart + srcSize;
+    U32 LLtype, Offtype, MLtype;
+    U32 LLlog, Offlog, MLlog;
+    size_t dumpsLength;
+
+    /* check */
+    if (srcSize < 5) return ERROR(srcSize_wrong);
+
+    /* SeqHead */
+    *nbSeq = MEM_readLE16(ip); ip+=2;
+    LLtype  = *ip >> 6;
+    Offtype = (*ip >> 4) & 3;
+    MLtype  = (*ip >> 2) & 3;
+    if (*ip & 2)
+    {
+        dumpsLength  = ip[2];
+        dumpsLength += ip[1] << 8;
+        ip += 3;
+    }
+    else
+    {
+        dumpsLength  = ip[1];
+        dumpsLength += (ip[0] & 1) << 8;
+        ip += 2;
+    }
+    *dumpsPtr = ip;
+    ip += dumpsLength;
+    *dumpsLengthPtr = dumpsLength;
+
+    /* check */
+    if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
+
+    /* sequences */
+    {
+        S16 norm[MaxML+1];    /* assumption : MaxML >= MaxLL >= MaxOff */
+        size_t headerSize;
+
+        /* Build DTables */
+        switch(LLtype)
+        {
+        case bt_rle :
+            LLlog = 0;
+            FSE_buildDTable_rle(DTableLL, *ip++); break;
+        case bt_raw :
+            LLlog = LLbits;
+            FSE_buildDTable_raw(DTableLL, LLbits); break;
+        default :
+            {   U32 max = MaxLL;
+                headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);
+                if (FSE_isError(headerSize)) return ERROR(GENERIC);
+                if (LLlog > LLFSELog) return ERROR(corruption_detected);
+                ip += headerSize;
+                FSE_buildDTable(DTableLL, norm, max, LLlog);
+        }   }
+
+        switch(Offtype)
+        {
+        case bt_rle :
+            Offlog = 0;
+            if (ip > iend-2) return ERROR(srcSize_wrong);   /* min : "raw", hence no header, but at least xxLog bits */
+            FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */
+            break;
+        case bt_raw :
+            Offlog = Offbits;
+            FSE_buildDTable_raw(DTableOffb, Offbits); break;
+        default :
+            {   U32 max = MaxOff;
+                headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);
+                if (FSE_isError(headerSize)) return ERROR(GENERIC);
+                if (Offlog > OffFSELog) return ERROR(corruption_detected);
+                ip += headerSize;
+                FSE_buildDTable(DTableOffb, norm, max, Offlog);
+        }   }
+
+        switch(MLtype)
+        {
+        case bt_rle :
+            MLlog = 0;
+            if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
+            FSE_buildDTable_rle(DTableML, *ip++); break;
+        case bt_raw :
+            MLlog = MLbits;
+            FSE_buildDTable_raw(DTableML, MLbits); break;
+        default :
+            {   U32 max = MaxML;
+                headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);
+                if (FSE_isError(headerSize)) return ERROR(GENERIC);
+                if (MLlog > MLFSELog) return ERROR(corruption_detected);
+                ip += headerSize;
+                FSE_buildDTable(DTableML, norm, max, MLlog);
+    }   }   }
+
+    return ip-istart;
+}
+
+
+typedef struct {
+    size_t litLength;
+    size_t offset;
+    size_t matchLength;
+} seq_t;
+
+typedef struct {
+    BIT_DStream_t DStream;
+    FSE_DState_t stateLL;
+    FSE_DState_t stateOffb;
+    FSE_DState_t stateML;
+    size_t prevOffset;
+    const BYTE* dumps;
+    const BYTE* dumpsEnd;
+} seqState_t;
+
+
+static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
+{
+    size_t litLength;
+    size_t prevOffset;
+    size_t offset;
+    size_t matchLength;
+    const BYTE* dumps = seqState->dumps;
+    const BYTE* const de = seqState->dumpsEnd;
+
+    /* Literal length */
+    litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
+    prevOffset = litLength ? seq->offset : seqState->prevOffset;
+    if (litLength == MaxLL) {
+        U32 add = *dumps++;
+        if (add < 255) litLength += add;
+        else {
+            litLength = dumps[0] + (dumps[1]<<8) + (dumps[2]<<16);
+            dumps += 3;
+        }
+        if (dumps > de) { litLength = MaxLL+255; }  /* late correction, to avoid using uninitialized memory */
+        if (dumps >= de) { dumps = de-1; }  /* late correction, to avoid read overflow (data is now corrupted anyway) */
+    }
+
+    /* Offset */
+    {   static const U32 offsetPrefix[MaxOff+1] = {
+                1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,
+                512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,
+                524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };
+        U32 offsetCode, nbBits;
+        offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream));   /* <= maxOff, by table construction */
+        if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
+        nbBits = offsetCode - 1;
+        if (offsetCode==0) nbBits = 0;   /* cmove */
+        offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits);
+        if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
+        if (offsetCode==0) offset = prevOffset;   /* cmove */
+        if (offsetCode | !litLength) seqState->prevOffset = seq->offset;   /* cmove */
+    }
+
+    /* MatchLength */
+    matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
+    if (matchLength == MaxML) {
+        U32 add = *dumps++;
+        if (add < 255) matchLength += add;
+        else {
+            matchLength = dumps[0] + (dumps[1]<<8) + (dumps[2]<<16);
+            dumps += 3;
+        }
+        if (dumps > de) { matchLength = MaxML+255; }  /* late correction, to avoid using uninitialized memory */
+        if (dumps >= de) { dumps = de-1; }  /* late correction, to avoid read overflow (data is now corrupted anyway) */
+    }
+    matchLength += MINMATCH;
+
+    /* save result */
+    seq->litLength = litLength;
+    seq->offset = offset;
+    seq->matchLength = matchLength;
+    seqState->dumps = dumps;
+}
+
+
+static size_t ZSTD_execSequence(BYTE* op,
+                                BYTE* const oend, seq_t sequence,
+                                const BYTE** litPtr, const BYTE* const litLimit,
+                                const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
+{
+    static const int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
+    static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* substracted */
+    BYTE* const oLitEnd = op + sequence.litLength;
+    const size_t sequenceLength = sequence.litLength + sequence.matchLength;
+    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
+    BYTE* const oend_8 = oend-8;
+    const BYTE* const litEnd = *litPtr + sequence.litLength;
+    const BYTE* match = oLitEnd - sequence.offset;
+
+    /* check */
+    if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall);   /* last match must start at a minimum distance of 8 from oend */
+    if (oMatchEnd > oend) return ERROR(dstSize_tooSmall);   /* overwrite beyond dst buffer */
+    if (litEnd > litLimit) return ERROR(corruption_detected);   /* risk read beyond lit buffer */
+
+    /* copy Literals */
+    ZSTD_wildcopy(op, *litPtr, sequence.litLength);   /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
+    op = oLitEnd;
+    *litPtr = litEnd;   /* update for next sequence */
+
+    /* copy Match */
+    if (sequence.offset > (size_t)(oLitEnd - base))
+    {
+        /* offset beyond prefix */
+        if (sequence.offset > (size_t)(oLitEnd - vBase))
+            return ERROR(corruption_detected);
+        match = dictEnd - (base-match);
+        if (match + sequence.matchLength <= dictEnd)
+        {
+            memmove(oLitEnd, match, sequence.matchLength);
+            return sequenceLength;
+        }
+        /* span extDict & currentPrefixSegment */
+        {
+            size_t length1 = dictEnd - match;
+            memmove(oLitEnd, match, length1);
+            op = oLitEnd + length1;
+            sequence.matchLength -= length1;
+            match = base;
+            if (op > oend_8 || sequence.matchLength < MINMATCH) {
+              while (op < oMatchEnd) *op++ = *match++;
+              return sequenceLength;
+            }
+        }
+    }
+    /* Requirement: op <= oend_8 */
+
+    /* match within prefix */
+    if (sequence.offset < 8) {
+        /* close range match, overlap */
+        const int sub2 = dec64table[sequence.offset];
+        op[0] = match[0];
+        op[1] = match[1];
+        op[2] = match[2];
+        op[3] = match[3];
+        match += dec32table[sequence.offset];
+        ZSTD_copy4(op+4, match);
+        match -= sub2;
+    } else {
+        ZSTD_copy8(op, match);
+    }
+    op += 8; match += 8;
+
+    if (oMatchEnd > oend-(16-MINMATCH))
+    {
+        if (op < oend_8)
+        {
+            ZSTD_wildcopy(op, match, oend_8 - op);
+            match += oend_8 - op;
+            op = oend_8;
+        }
+        while (op < oMatchEnd) *op++ = *match++;
+    }
+    else
+    {
+        ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
+    }
+    return sequenceLength;
+}
+
+
+static size_t ZSTD_decompressSequences(
+                               ZSTD_DCtx* dctx,
+                               void* dst, size_t maxDstSize,
+                         const void* seqStart, size_t seqSize)
+{
+    const BYTE* ip = (const BYTE*)seqStart;
+    const BYTE* const iend = ip + seqSize;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* op = ostart;
+    BYTE* const oend = ostart + maxDstSize;
+    size_t errorCode, dumpsLength;
+    const BYTE* litPtr = dctx->litPtr;
+    const BYTE* const litEnd = litPtr + dctx->litSize;
+    int nbSeq;
+    const BYTE* dumps;
+    U32* DTableLL = dctx->LLTable;
+    U32* DTableML = dctx->MLTable;
+    U32* DTableOffb = dctx->OffTable;
+    const BYTE* const base = (const BYTE*) (dctx->base);
+    const BYTE* const vBase = (const BYTE*) (dctx->vBase);
+    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+
+    /* Build Decoding Tables */
+    errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
+                                      DTableLL, DTableML, DTableOffb,
+                                      ip, iend-ip);
+    if (ZSTD_isError(errorCode)) return errorCode;
+    ip += errorCode;
+
+    /* Regen sequences */
+    {
+        seq_t sequence;
+        seqState_t seqState;
+
+        memset(&sequence, 0, sizeof(sequence));
+        sequence.offset = 4;
+        seqState.dumps = dumps;
+        seqState.dumpsEnd = dumps + dumpsLength;
+        seqState.prevOffset = 4;
+        errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip);
+        if (ERR_isError(errorCode)) return ERROR(corruption_detected);
+        FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
+        FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
+        FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
+
+        for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; )
+        {
+            size_t oneSeqSize;
+            nbSeq--;
+            ZSTD_decodeSequence(&sequence, &seqState);
+            oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
+            if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
+            op += oneSeqSize;
+        }
+
+        /* check if reached exact end */
+        if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected);   /* DStream should be entirely and exactly consumed; otherwise data is corrupted */
+
+        /* last literal segment */
+        {
+            size_t lastLLSize = litEnd - litPtr;
+            if (litPtr > litEnd) return ERROR(corruption_detected);
+            if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
+            if (op != litPtr) memcpy(op, litPtr, lastLLSize);
+            op += lastLLSize;
+        }
+    }
+
+    return op-ostart;
+}
+
+
+static void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
+{
+    if (dst != dctx->previousDstEnd)   /* not contiguous */
+    {
+        dctx->dictEnd = dctx->previousDstEnd;
+        dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
+        dctx->base = dst;
+        dctx->previousDstEnd = dst;
+    }
+}
+
+
+static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
+                            void* dst, size_t maxDstSize,
+                      const void* src, size_t srcSize)
+{
+    /* blockType == blockCompressed */
+    const BYTE* ip = (const BYTE*)src;
+
+    /* Decode literals sub-block */
+    size_t litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
+    if (ZSTD_isError(litCSize)) return litCSize;
+    ip += litCSize;
+    srcSize -= litCSize;
+
+    return ZSTD_decompressSequences(dctx, dst, maxDstSize, ip, srcSize);
+}
+
+
+static size_t ZSTD_decompress_usingDict(ZSTD_DCtx* ctx,
+                                 void* dst, size_t maxDstSize,
+                                 const void* src, size_t srcSize,
+                                 const void* dict, size_t dictSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+    const BYTE* iend = ip + srcSize;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* op = ostart;
+    BYTE* const oend = ostart + maxDstSize;
+    size_t remainingSize = srcSize;
+    blockProperties_t blockProperties;
+
+    /* init */
+    ZSTD_resetDCtx(ctx);
+    if (dict)
+    {
+        ZSTD_decompress_insertDictionary(ctx, dict, dictSize);
+        ctx->dictEnd = ctx->previousDstEnd;
+        ctx->vBase = (const char*)dst - ((const char*)(ctx->previousDstEnd) - (const char*)(ctx->base));
+        ctx->base = dst;
+    }
+    else
+    {
+        ctx->vBase = ctx->base = ctx->dictEnd = dst;
+    }
+
+    /* Frame Header */
+    {
+        size_t frameHeaderSize;
+        if (srcSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+        frameHeaderSize = ZSTD_decodeFrameHeader_Part1(ctx, src, ZSTD_frameHeaderSize_min);
+        if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
+        if (srcSize < frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
+        ip += frameHeaderSize; remainingSize -= frameHeaderSize;
+        frameHeaderSize = ZSTD_decodeFrameHeader_Part2(ctx, src, frameHeaderSize);
+        if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
+    }
+
+    /* Loop on each block */
+    while (1)
+    {
+        size_t decodedSize=0;
+        size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties);
+        if (ZSTD_isError(cBlockSize)) return cBlockSize;
+
+        ip += ZSTD_blockHeaderSize;
+        remainingSize -= ZSTD_blockHeaderSize;
+        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+        switch(blockProperties.blockType)
+        {
+        case bt_compressed:
+            decodedSize = ZSTD_decompressBlock_internal(ctx, op, oend-op, ip, cBlockSize);
+            break;
+        case bt_raw :
+            decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize);
+            break;
+        case bt_rle :
+            return ERROR(GENERIC);   /* not yet supported */
+            break;
+        case bt_end :
+            /* end of frame */
+            if (remainingSize) return ERROR(srcSize_wrong);
+            break;
+        default:
+            return ERROR(GENERIC);   /* impossible */
+        }
+        if (cBlockSize == 0) break;   /* bt_end */
+
+        if (ZSTD_isError(decodedSize)) return decodedSize;
+        op += decodedSize;
+        ip += cBlockSize;
+        remainingSize -= cBlockSize;
+    }
+
+    return op-ostart;
+}
+
+static size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+    size_t remainingSize = srcSize;
+    blockProperties_t blockProperties;
+
+    /* Frame Header */
+    if (srcSize < ZSTD_frameHeaderSize_min) return ERROR(srcSize_wrong);
+    if (MEM_readLE32(src) != ZSTD_MAGICNUMBER) return ERROR(prefix_unknown);
+    ip += ZSTD_frameHeaderSize_min; remainingSize -= ZSTD_frameHeaderSize_min;
+
+    /* Loop on each block */
+    while (1)
+    {
+        size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
+        if (ZSTD_isError(cBlockSize)) return cBlockSize;
+
+        ip += ZSTD_blockHeaderSize;
+        remainingSize -= ZSTD_blockHeaderSize;
+        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+        if (cBlockSize == 0) break;   /* bt_end */
+
+        ip += cBlockSize;
+        remainingSize -= cBlockSize;
+    }
+
+    return ip - (const BYTE*)src;
+}
+
+/* ******************************
+*  Streaming Decompression API
+********************************/
+static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx)
+{
+    return dctx->expected;
+}
+
+static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    /* Sanity check */
+    if (srcSize != ctx->expected) return ERROR(srcSize_wrong);
+    ZSTD_checkContinuity(ctx, dst);
+
+    /* Decompress : frame header; part 1 */
+    switch (ctx->stage)
+    {
+    case ZSTDds_getFrameHeaderSize :
+        /* get frame header size */
+        if (srcSize != ZSTD_frameHeaderSize_min) return ERROR(srcSize_wrong);   /* impossible */
+        ctx->headerSize = ZSTD_decodeFrameHeader_Part1(ctx, src, ZSTD_frameHeaderSize_min);
+        if (ZSTD_isError(ctx->headerSize)) return ctx->headerSize;
+        memcpy(ctx->headerBuffer, src, ZSTD_frameHeaderSize_min);
+        if (ctx->headerSize > ZSTD_frameHeaderSize_min) return ERROR(GENERIC);   /* impossible */
+        ctx->expected = 0;   /* not necessary to copy more */
+        /* fallthrough */
+    case ZSTDds_decodeFrameHeader:
+        /* get frame header */
+        {   size_t const result = ZSTD_decodeFrameHeader_Part2(ctx, ctx->headerBuffer, ctx->headerSize);
+            if (ZSTD_isError(result)) return result;
+            ctx->expected = ZSTD_blockHeaderSize;
+            ctx->stage = ZSTDds_decodeBlockHeader;
+            return 0;
+        }
+    case ZSTDds_decodeBlockHeader:
+        /* Decode block header */
+        {   blockProperties_t bp;
+            size_t const blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
+            if (ZSTD_isError(blockSize)) return blockSize;
+            if (bp.blockType == bt_end)
+            {
+                ctx->expected = 0;
+                ctx->stage = ZSTDds_getFrameHeaderSize;
+            }
+            else
+            {
+                ctx->expected = blockSize;
+                ctx->bType = bp.blockType;
+                ctx->stage = ZSTDds_decompressBlock;
+            }
+            return 0;
+        }
+    case ZSTDds_decompressBlock:
+        {
+            /* Decompress : block content */
+            size_t rSize;
+            switch(ctx->bType)
+            {
+            case bt_compressed:
+                rSize = ZSTD_decompressBlock_internal(ctx, dst, maxDstSize, src, srcSize);
+                break;
+            case bt_raw :
+                rSize = ZSTD_copyRawBlock(dst, maxDstSize, src, srcSize);
+                break;
+            case bt_rle :
+                return ERROR(GENERIC);   /* not yet handled */
+                break;
+            case bt_end :   /* should never happen (filtered at phase 1) */
+                rSize = 0;
+                break;
+            default:
+                return ERROR(GENERIC);
+            }
+            ctx->stage = ZSTDds_decodeBlockHeader;
+            ctx->expected = ZSTD_blockHeaderSize;
+            ctx->previousDstEnd = (char*)dst + rSize;
+            return rSize;
+        }
+    default:
+        return ERROR(GENERIC);   /* impossible */
+    }
+}
+
+
+static void ZSTD_decompress_insertDictionary(ZSTD_DCtx* ctx, const void* dict, size_t dictSize)
+{
+    ctx->dictEnd = ctx->previousDstEnd;
+    ctx->vBase = (const char*)dict - ((const char*)(ctx->previousDstEnd) - (const char*)(ctx->base));
+    ctx->base = dict;
+    ctx->previousDstEnd = (const char*)dict + dictSize;
+}
+
+
+
+/*
+    Buffered version of Zstd compression library
+    Copyright (C) 2015, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd source repository : https://github.com/Cyan4973/zstd
+    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+/* The objects defined into this file should be considered experimental.
+ * They are not labelled stable, as their prototype may change in the future.
+ * You can use them for tests, provide feedback, or if you can endure risk of future changes.
+ */
+
+/* *************************************
+*  Includes
+***************************************/
+#include <stdlib.h>
+
+
+/** ************************************************
+*  Streaming decompression
+*
+*  A ZBUFF_DCtx object is required to track streaming operation.
+*  Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources.
+*  Use ZBUFF_decompressInit() to start a new decompression operation.
+*  ZBUFF_DCtx objects can be reused multiple times.
+*
+*  Use ZBUFF_decompressContinue() repetitively to consume your input.
+*  *srcSizePtr and *maxDstSizePtr can be any size.
+*  The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr.
+*  Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input.
+*  The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst .
+*  return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)
+*            or 0 when a frame is completely decoded
+*            or an error code, which can be tested using ZBUFF_isError().
+*
+*  Hint : recommended buffer sizes (not compulsory)
+*  output : 128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded.
+*  input : just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
+* **************************************************/
+
+typedef enum { ZBUFFds_init, ZBUFFds_readHeader, ZBUFFds_loadHeader, ZBUFFds_decodeHeader,
+               ZBUFFds_read, ZBUFFds_load, ZBUFFds_flush } ZBUFF_dStage;
+
+/* *** Resource management *** */
+
+#define ZSTD_frameHeaderSize_max 5   /* too magical, should come from reference */
+struct ZBUFFv04_DCtx_s {
+    ZSTD_DCtx* zc;
+    ZSTD_parameters params;
+    char* inBuff;
+    size_t inBuffSize;
+    size_t inPos;
+    char* outBuff;
+    size_t outBuffSize;
+    size_t outStart;
+    size_t outEnd;
+    size_t hPos;
+    const char* dict;
+    size_t dictSize;
+    ZBUFF_dStage stage;
+    unsigned char headerBuffer[ZSTD_frameHeaderSize_max];
+};   /* typedef'd to ZBUFF_DCtx within "zstd_buffered.h" */
+
+typedef ZBUFFv04_DCtx ZBUFF_DCtx;
+
+
+static ZBUFF_DCtx* ZBUFF_createDCtx(void)
+{
+    ZBUFF_DCtx* zbc = (ZBUFF_DCtx*)malloc(sizeof(ZBUFF_DCtx));
+    if (zbc==NULL) return NULL;
+    memset(zbc, 0, sizeof(*zbc));
+    zbc->zc = ZSTD_createDCtx();
+    zbc->stage = ZBUFFds_init;
+    return zbc;
+}
+
+static size_t ZBUFF_freeDCtx(ZBUFF_DCtx* zbc)
+{
+    if (zbc==NULL) return 0;   /* support free on null */
+    ZSTD_freeDCtx(zbc->zc);
+    free(zbc->inBuff);
+    free(zbc->outBuff);
+    free(zbc);
+    return 0;
+}
+
+
+/* *** Initialization *** */
+
+static size_t ZBUFF_decompressInit(ZBUFF_DCtx* zbc)
+{
+    zbc->stage = ZBUFFds_readHeader;
+    zbc->hPos = zbc->inPos = zbc->outStart = zbc->outEnd = zbc->dictSize = 0;
+    return ZSTD_resetDCtx(zbc->zc);
+}
+
+
+static size_t ZBUFF_decompressWithDictionary(ZBUFF_DCtx* zbc, const void* src, size_t srcSize)
+{
+    zbc->dict = (const char*)src;
+    zbc->dictSize = srcSize;
+    return 0;
+}
+
+static size_t ZBUFF_limitCopy(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    size_t length = MIN(maxDstSize, srcSize);
+    memcpy(dst, src, length);
+    return length;
+}
+
+/* *** Decompression *** */
+
+static size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbc, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr)
+{
+    const char* const istart = (const char*)src;
+    const char* ip = istart;
+    const char* const iend = istart + *srcSizePtr;
+    char* const ostart = (char*)dst;
+    char* op = ostart;
+    char* const oend = ostart + *maxDstSizePtr;
+    U32 notDone = 1;
+
+    DEBUGLOG(5, "ZBUFF_decompressContinue");
+    while (notDone)
+    {
+        switch(zbc->stage)
+        {
+
+        case ZBUFFds_init :
+            DEBUGLOG(5, "ZBUFF_decompressContinue: stage==ZBUFFds_init => ERROR(init_missing)");
+            return ERROR(init_missing);
+
+        case ZBUFFds_readHeader :
+            /* read header from src */
+            {   size_t const headerSize = ZSTD_getFrameParams(&(zbc->params), src, *srcSizePtr);
+                if (ZSTD_isError(headerSize)) return headerSize;
+                if (headerSize) {
+                    /* not enough input to decode header : tell how many bytes would be necessary */
+                    memcpy(zbc->headerBuffer+zbc->hPos, src, *srcSizePtr);
+                    zbc->hPos += *srcSizePtr;
+                    *maxDstSizePtr = 0;
+                    zbc->stage = ZBUFFds_loadHeader;
+                    return headerSize - zbc->hPos;
+                }
+                zbc->stage = ZBUFFds_decodeHeader;
+                break;
+            }
+
+        case ZBUFFds_loadHeader:
+            /* complete header from src */
+            {   size_t headerSize = ZBUFF_limitCopy(
+                    zbc->headerBuffer + zbc->hPos, ZSTD_frameHeaderSize_max - zbc->hPos,
+                    src, *srcSizePtr);
+                zbc->hPos += headerSize;
+                ip += headerSize;
+                headerSize = ZSTD_getFrameParams(&(zbc->params), zbc->headerBuffer, zbc->hPos);
+                if (ZSTD_isError(headerSize)) return headerSize;
+                if (headerSize) {
+                    /* not enough input to decode header : tell how many bytes would be necessary */
+                    *maxDstSizePtr = 0;
+                    return headerSize - zbc->hPos;
+            }   }
+            /* intentional fallthrough */
+
+        case ZBUFFds_decodeHeader:
+                /* apply header to create / resize buffers */
+                {   size_t const neededOutSize = (size_t)1 << zbc->params.windowLog;
+                    size_t const neededInSize = BLOCKSIZE;   /* a block is never > BLOCKSIZE */
+                    if (zbc->inBuffSize < neededInSize) {
+                        free(zbc->inBuff);
+                        zbc->inBuffSize = neededInSize;
+                        zbc->inBuff = (char*)malloc(neededInSize);
+                        if (zbc->inBuff == NULL) return ERROR(memory_allocation);
+                    }
+                    if (zbc->outBuffSize < neededOutSize) {
+                        free(zbc->outBuff);
+                        zbc->outBuffSize = neededOutSize;
+                        zbc->outBuff = (char*)malloc(neededOutSize);
+                        if (zbc->outBuff == NULL) return ERROR(memory_allocation);
+                }   }
+                if (zbc->dictSize)
+                    ZSTD_decompress_insertDictionary(zbc->zc, zbc->dict, zbc->dictSize);
+                if (zbc->hPos) {
+                    /* some data already loaded into headerBuffer : transfer into inBuff */
+                    memcpy(zbc->inBuff, zbc->headerBuffer, zbc->hPos);
+                    zbc->inPos = zbc->hPos;
+                    zbc->hPos = 0;
+                    zbc->stage = ZBUFFds_load;
+                    break;
+                }
+                zbc->stage = ZBUFFds_read;
+		/* fall-through */
+        case ZBUFFds_read:
+            {
+                size_t neededInSize = ZSTD_nextSrcSizeToDecompress(zbc->zc);
+                if (neededInSize==0)   /* end of frame */
+                {
+                    zbc->stage = ZBUFFds_init;
+                    notDone = 0;
+                    break;
+                }
+                if ((size_t)(iend-ip) >= neededInSize)
+                {
+                    /* directly decode from src */
+                    size_t decodedSize = ZSTD_decompressContinue(zbc->zc,
+                        zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,
+                        ip, neededInSize);
+                    if (ZSTD_isError(decodedSize)) return decodedSize;
+                    ip += neededInSize;
+                    if (!decodedSize) break;   /* this was just a header */
+                    zbc->outEnd = zbc->outStart +  decodedSize;
+                    zbc->stage = ZBUFFds_flush;
+                    break;
+                }
+                if (ip==iend) { notDone = 0; break; }   /* no more input */
+                zbc->stage = ZBUFFds_load;
+            }
+	    /* fall-through */
+        case ZBUFFds_load:
+            {
+                size_t neededInSize = ZSTD_nextSrcSizeToDecompress(zbc->zc);
+                size_t toLoad = neededInSize - zbc->inPos;   /* should always be <= remaining space within inBuff */
+                size_t loadedSize;
+                if (toLoad > zbc->inBuffSize - zbc->inPos) return ERROR(corruption_detected);   /* should never happen */
+                loadedSize = ZBUFF_limitCopy(zbc->inBuff + zbc->inPos, toLoad, ip, iend-ip);
+                ip += loadedSize;
+                zbc->inPos += loadedSize;
+                if (loadedSize < toLoad) { notDone = 0; break; }   /* not enough input, wait for more */
+                {
+                    size_t decodedSize = ZSTD_decompressContinue(zbc->zc,
+                        zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,
+                        zbc->inBuff, neededInSize);
+                    if (ZSTD_isError(decodedSize)) return decodedSize;
+                    zbc->inPos = 0;   /* input is consumed */
+                    if (!decodedSize) { zbc->stage = ZBUFFds_read; break; }   /* this was just a header */
+                    zbc->outEnd = zbc->outStart +  decodedSize;
+                    zbc->stage = ZBUFFds_flush;
+                    /* ZBUFFds_flush follows */
+                }
+            }
+	    /* fall-through */
+        case ZBUFFds_flush:
+            {
+                size_t toFlushSize = zbc->outEnd - zbc->outStart;
+                size_t flushedSize = ZBUFF_limitCopy(op, oend-op, zbc->outBuff + zbc->outStart, toFlushSize);
+                op += flushedSize;
+                zbc->outStart += flushedSize;
+                if (flushedSize == toFlushSize)
+                {
+                    zbc->stage = ZBUFFds_read;
+                    if (zbc->outStart + BLOCKSIZE > zbc->outBuffSize)
+                        zbc->outStart = zbc->outEnd = 0;
+                    break;
+                }
+                /* cannot flush everything */
+                notDone = 0;
+                break;
+            }
+        default: return ERROR(GENERIC);   /* impossible */
+        }
+    }
+
+    *srcSizePtr = ip-istart;
+    *maxDstSizePtr = op-ostart;
+
+    {
+        size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zbc->zc);
+        if (nextSrcSizeHint > 3) nextSrcSizeHint+= 3;   /* get the next block header while at it */
+        nextSrcSizeHint -= zbc->inPos;   /* already loaded*/
+        return nextSrcSizeHint;
+    }
+}
+
+
+/* *************************************
+*  Tool functions
+***************************************/
+unsigned ZBUFFv04_isError(size_t errorCode) { return ERR_isError(errorCode); }
+const char* ZBUFFv04_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
+
+size_t ZBUFFv04_recommendedDInSize()  { return BLOCKSIZE + 3; }
+size_t ZBUFFv04_recommendedDOutSize() { return BLOCKSIZE; }
+
+
+
+/*- ========================================================================= -*/
+
+/* final wrapping stage */
+
+size_t ZSTDv04_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    return ZSTD_decompress_usingDict(dctx, dst, maxDstSize, src, srcSize, NULL, 0);
+}
+
+size_t ZSTDv04_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE==1)
+    size_t regenSize;
+    ZSTD_DCtx* dctx = ZSTD_createDCtx();
+    if (dctx==NULL) return ERROR(memory_allocation);
+    regenSize = ZSTDv04_decompressDCtx(dctx, dst, maxDstSize, src, srcSize);
+    ZSTD_freeDCtx(dctx);
+    return regenSize;
+#else
+    ZSTD_DCtx dctx;
+    return ZSTDv04_decompressDCtx(&dctx, dst, maxDstSize, src, srcSize);
+#endif
+}
+
+size_t ZSTDv04_findFrameCompressedSize(const void* src, size_t srcSize)
+{
+    return ZSTD_findFrameCompressedSize(src, srcSize);
+}
+
+size_t ZSTDv04_resetDCtx(ZSTDv04_Dctx* dctx) { return ZSTD_resetDCtx(dctx); }
+
+size_t ZSTDv04_nextSrcSizeToDecompress(ZSTDv04_Dctx* dctx)
+{
+    return ZSTD_nextSrcSizeToDecompress(dctx);
+}
+
+size_t ZSTDv04_decompressContinue(ZSTDv04_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    return ZSTD_decompressContinue(dctx, dst, maxDstSize, src, srcSize);
+}
+
+
+
+ZBUFFv04_DCtx* ZBUFFv04_createDCtx(void) { return ZBUFF_createDCtx(); }
+size_t ZBUFFv04_freeDCtx(ZBUFFv04_DCtx* dctx) { return ZBUFF_freeDCtx(dctx); }
+
+size_t ZBUFFv04_decompressInit(ZBUFFv04_DCtx* dctx) { return ZBUFF_decompressInit(dctx); }
+size_t ZBUFFv04_decompressWithDictionary(ZBUFFv04_DCtx* dctx, const void* src, size_t srcSize)
+{ return ZBUFF_decompressWithDictionary(dctx, src, srcSize); }
+
+size_t ZBUFFv04_decompressContinue(ZBUFFv04_DCtx* dctx, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr)
+{
+    DEBUGLOG(5, "ZBUFFv04_decompressContinue");
+    return ZBUFF_decompressContinue(dctx, dst, maxDstSizePtr, src, srcSizePtr);
+}
+
+ZSTD_DCtx* ZSTDv04_createDCtx(void) { return ZSTD_createDCtx(); }
+size_t ZSTDv04_freeDCtx(ZSTD_DCtx* dctx) { return ZSTD_freeDCtx(dctx); }
+
+size_t ZSTDv04_getFrameParams(ZSTD_parameters* params, const void* src, size_t srcSize)
+{
+    return ZSTD_getFrameParams(params, src, srcSize);
+}
diff --git a/vendor/github.com/DataDog/zstd/zstd_v04.h b/vendor/github.com/DataDog/zstd/zstd_v04.h
new file mode 100644
index 0000000..6391631
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v04.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTD_V04_H_91868324769238
+#define ZSTD_V04_H_91868324769238
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* *************************************
+*  Includes
+***************************************/
+#include <stddef.h>   /* size_t */
+
+
+/* *************************************
+*  Simple one-step function
+***************************************/
+/**
+ZSTDv04_decompress() : decompress ZSTD frames compliant with v0.4.x format
+    compressedSize : is the exact source size
+    maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
+                      It must be equal or larger than originalSize, otherwise decompression will fail.
+    return : the number of bytes decompressed into destination buffer (originalSize)
+             or an errorCode if it fails (which can be tested using ZSTDv01_isError())
+*/
+size_t ZSTDv04_decompress( void* dst, size_t maxOriginalSize,
+                     const void* src, size_t compressedSize);
+
+/**
+ZSTDv04_getFrameSrcSize() : get the source length of a ZSTD frame compliant with v0.4.x format
+    compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
+    return : the number of bytes that would be read to decompress this frame
+             or an errorCode if it fails (which can be tested using ZSTDv04_isError())
+*/
+size_t ZSTDv04_findFrameCompressedSize(const void* src, size_t compressedSize);
+
+/**
+ZSTDv04_isError() : tells if the result of ZSTDv04_decompress() is an error
+*/
+unsigned ZSTDv04_isError(size_t code);
+
+
+/* *************************************
+*  Advanced functions
+***************************************/
+typedef struct ZSTDv04_Dctx_s ZSTDv04_Dctx;
+ZSTDv04_Dctx* ZSTDv04_createDCtx(void);
+size_t ZSTDv04_freeDCtx(ZSTDv04_Dctx* dctx);
+
+size_t ZSTDv04_decompressDCtx(ZSTDv04_Dctx* dctx,
+                              void* dst, size_t maxOriginalSize,
+                        const void* src, size_t compressedSize);
+
+
+/* *************************************
+*  Direct Streaming
+***************************************/
+size_t ZSTDv04_resetDCtx(ZSTDv04_Dctx* dctx);
+
+size_t ZSTDv04_nextSrcSizeToDecompress(ZSTDv04_Dctx* dctx);
+size_t ZSTDv04_decompressContinue(ZSTDv04_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
+/**
+  Use above functions alternatively.
+  ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
+  ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
+  Result is the number of bytes regenerated within 'dst'.
+  It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
+*/
+
+
+/* *************************************
+*  Buffered Streaming
+***************************************/
+typedef struct ZBUFFv04_DCtx_s ZBUFFv04_DCtx;
+ZBUFFv04_DCtx* ZBUFFv04_createDCtx(void);
+size_t         ZBUFFv04_freeDCtx(ZBUFFv04_DCtx* dctx);
+
+size_t ZBUFFv04_decompressInit(ZBUFFv04_DCtx* dctx);
+size_t ZBUFFv04_decompressWithDictionary(ZBUFFv04_DCtx* dctx, const void* dict, size_t dictSize);
+
+size_t ZBUFFv04_decompressContinue(ZBUFFv04_DCtx* dctx, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr);
+
+/** ************************************************
+*  Streaming decompression
+*
+*  A ZBUFF_DCtx object is required to track streaming operation.
+*  Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources.
+*  Use ZBUFF_decompressInit() to start a new decompression operation.
+*  ZBUFF_DCtx objects can be reused multiple times.
+*
+*  Optionally, a reference to a static dictionary can be set, using ZBUFF_decompressWithDictionary()
+*  It must be the same content as the one set during compression phase.
+*  Dictionary content must remain accessible during the decompression process.
+*
+*  Use ZBUFF_decompressContinue() repetitively to consume your input.
+*  *srcSizePtr and *maxDstSizePtr can be any size.
+*  The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr.
+*  Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
+*  The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst.
+*  @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)
+*            or 0 when a frame is completely decoded
+*            or an error code, which can be tested using ZBUFF_isError().
+*
+*  Hint : recommended buffer sizes (not compulsory) : ZBUFF_recommendedDInSize / ZBUFF_recommendedDOutSize
+*  output : ZBUFF_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded.
+*  input : ZBUFF_recommendedDInSize==128Kb+3; just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
+* **************************************************/
+unsigned ZBUFFv04_isError(size_t errorCode);
+const char* ZBUFFv04_getErrorName(size_t errorCode);
+
+
+/** The below functions provide recommended buffer sizes for Compression or Decompression operations.
+*   These sizes are not compulsory, they just tend to offer better latency */
+size_t ZBUFFv04_recommendedDInSize(void);
+size_t ZBUFFv04_recommendedDOutSize(void);
+
+
+/* *************************************
+*  Prefix - version detection
+***************************************/
+#define ZSTDv04_magicNumber 0xFD2FB524   /* v0.4 */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* ZSTD_V04_H_91868324769238 */
diff --git a/vendor/github.com/DataDog/zstd/zstd_v05.c b/vendor/github.com/DataDog/zstd/zstd_v05.c
new file mode 100644
index 0000000..a5e1b1f
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v05.c
@@ -0,0 +1,4011 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/*- Dependencies -*/
+#include "zstd_v05.h"
+#include "error_private.h"
+
+
+/* ******************************************************************
+   mem.h
+   low-level memory access routines
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSEv05 source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef MEM_H_MODULE
+#define MEM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*-****************************************
+*  Dependencies
+******************************************/
+#include <stddef.h>    /* size_t, ptrdiff_t */
+#include <string.h>    /* memcpy */
+
+
+/*-****************************************
+*  Compiler specifics
+******************************************/
+#if defined(__GNUC__)
+#  define MEM_STATIC static __attribute__((unused))
+#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+#  define MEM_STATIC static inline
+#elif defined(_MSC_VER)
+#  define MEM_STATIC static __inline
+#else
+#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+
+/*-**************************************************************
+*  Basic Types
+*****************************************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# include <stdint.h>
+  typedef  uint8_t BYTE;
+  typedef uint16_t U16;
+  typedef  int16_t S16;
+  typedef uint32_t U32;
+  typedef  int32_t S32;
+  typedef uint64_t U64;
+  typedef  int64_t S64;
+#else
+  typedef unsigned char       BYTE;
+  typedef unsigned short      U16;
+  typedef   signed short      S16;
+  typedef unsigned int        U32;
+  typedef   signed int        S32;
+  typedef unsigned long long  U64;
+  typedef   signed long long  S64;
+#endif
+
+
+/*-**************************************************************
+*  Memory I/O
+*****************************************************************/
+/* MEM_FORCE_MEMORY_ACCESS :
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ *            It can generate buggy code on targets depending on alignment.
+ *            In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
+#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+#    define MEM_FORCE_MEMORY_ACCESS 2
+#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
+  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+#    define MEM_FORCE_MEMORY_ACCESS 1
+#  endif
+#endif
+
+MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }
+MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }
+
+MEM_STATIC unsigned MEM_isLittleEndian(void)
+{
+    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */
+    return one.c[0];
+}
+
+#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
+
+/* violates C standard, by lying on structure alignment.
+Only use if no other choice to achieve best performance on target platform */
+MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
+MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
+MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
+MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
+MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
+
+#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign;
+
+MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
+MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
+MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
+MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign*)memPtr)->u64 = value; }
+
+#else
+
+/* default method, safe and standard.
+   can sometimes prove slower */
+
+MEM_STATIC U16 MEM_read16(const void* memPtr)
+{
+    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U32 MEM_read32(const void* memPtr)
+{
+    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U64 MEM_read64(const void* memPtr)
+{
+    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value)
+{
+    memcpy(memPtr, &value, sizeof(value));
+}
+
+MEM_STATIC void MEM_write32(void* memPtr, U32 value)
+{
+    memcpy(memPtr, &value, sizeof(value));
+}
+
+MEM_STATIC void MEM_write64(void* memPtr, U64 value)
+{
+    memcpy(memPtr, &value, sizeof(value));
+}
+
+#endif /* MEM_FORCE_MEMORY_ACCESS */
+
+
+MEM_STATIC U16 MEM_readLE16(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read16(memPtr);
+    else {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U16)(p[0] + (p[1]<<8));
+    }
+}
+
+MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
+{
+    if (MEM_isLittleEndian()) {
+        MEM_write16(memPtr, val);
+    } else {
+        BYTE* p = (BYTE*)memPtr;
+        p[0] = (BYTE)val;
+        p[1] = (BYTE)(val>>8);
+    }
+}
+
+MEM_STATIC U32 MEM_readLE32(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read32(memPtr);
+    else {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
+    }
+}
+
+
+MEM_STATIC U64 MEM_readLE64(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read64(memPtr);
+    else {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
+                     + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
+    }
+}
+
+
+MEM_STATIC size_t MEM_readLEST(const void* memPtr)
+{
+    if (MEM_32bits())
+        return (size_t)MEM_readLE32(memPtr);
+    else
+        return (size_t)MEM_readLE64(memPtr);
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* MEM_H_MODULE */
+
+/*
+    zstd - standard compression library
+    Header File for static linking only
+    Copyright (C) 2014-2016, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd homepage : http://www.zstd.net
+*/
+#ifndef ZSTD_STATIC_H
+#define ZSTD_STATIC_H
+
+/* The prototypes defined within this file are considered experimental.
+ * They should not be used in the context DLL as they may change in the future.
+ * Prefer static linking if you need them, to control breaking version changes issues.
+ */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+
+/*-*************************************
+*  Types
+***************************************/
+#define ZSTDv05_WINDOWLOG_ABSOLUTEMIN 11
+
+
+/*-*************************************
+*  Advanced functions
+***************************************/
+/*- Advanced Decompression functions -*/
+
+/*! ZSTDv05_decompress_usingPreparedDCtx() :
+*   Same as ZSTDv05_decompress_usingDict, but using a reference context `preparedDCtx`, where dictionary has been loaded.
+*   It avoids reloading the dictionary each time.
+*   `preparedDCtx` must have been properly initialized using ZSTDv05_decompressBegin_usingDict().
+*   Requires 2 contexts : 1 for reference, which will not be modified, and 1 to run the decompression operation */
+size_t ZSTDv05_decompress_usingPreparedDCtx(
+                                             ZSTDv05_DCtx* dctx, const ZSTDv05_DCtx* preparedDCtx,
+                                             void* dst, size_t dstCapacity,
+                                       const void* src, size_t srcSize);
+
+
+/* **************************************
+*  Streaming functions (direct mode)
+****************************************/
+size_t ZSTDv05_decompressBegin(ZSTDv05_DCtx* dctx);
+
+/*
+  Streaming decompression, direct mode (bufferless)
+
+  A ZSTDv05_DCtx object is required to track streaming operations.
+  Use ZSTDv05_createDCtx() / ZSTDv05_freeDCtx() to manage it.
+  A ZSTDv05_DCtx object can be re-used multiple times.
+
+  First typical operation is to retrieve frame parameters, using ZSTDv05_getFrameParams().
+  This operation is independent, and just needs enough input data to properly decode the frame header.
+  Objective is to retrieve *params.windowlog, to know minimum amount of memory required during decoding.
+  Result : 0 when successful, it means the ZSTDv05_parameters structure has been filled.
+           >0 : means there is not enough data into src. Provides the expected size to successfully decode header.
+           errorCode, which can be tested using ZSTDv05_isError()
+
+  Start decompression, with ZSTDv05_decompressBegin() or ZSTDv05_decompressBegin_usingDict()
+  Alternatively, you can copy a prepared context, using ZSTDv05_copyDCtx()
+
+  Then use ZSTDv05_nextSrcSizeToDecompress() and ZSTDv05_decompressContinue() alternatively.
+  ZSTDv05_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTDv05_decompressContinue().
+  ZSTDv05_decompressContinue() requires this exact amount of bytes, or it will fail.
+  ZSTDv05_decompressContinue() needs previous data blocks during decompression, up to (1 << windowlog).
+  They should preferably be located contiguously, prior to current block. Alternatively, a round buffer is also possible.
+
+  @result of ZSTDv05_decompressContinue() is the number of bytes regenerated within 'dst'.
+  It can be zero, which is not an error; it just means ZSTDv05_decompressContinue() has decoded some header.
+
+  A frame is fully decoded when ZSTDv05_nextSrcSizeToDecompress() returns zero.
+  Context can then be reset to start a new decompression.
+*/
+
+
+/* **************************************
+*  Block functions
+****************************************/
+/*! Block functions produce and decode raw zstd blocks, without frame metadata.
+    User will have to take in charge required information to regenerate data, such as block sizes.
+
+    A few rules to respect :
+    - Uncompressed block size must be <= 128 KB
+    - Compressing or decompressing requires a context structure
+      + Use ZSTDv05_createCCtx() and ZSTDv05_createDCtx()
+    - It is necessary to init context before starting
+      + compression : ZSTDv05_compressBegin()
+      + decompression : ZSTDv05_decompressBegin()
+      + variants _usingDict() are also allowed
+      + copyCCtx() and copyDCtx() work too
+    - When a block is considered not compressible enough, ZSTDv05_compressBlock() result will be zero.
+      In which case, nothing is produced into `dst`.
+      + User must test for such outcome and deal directly with uncompressed data
+      + ZSTDv05_decompressBlock() doesn't accept uncompressed data as input !!
+*/
+
+size_t ZSTDv05_decompressBlock(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif  /* ZSTDv05_STATIC_H */
+
+
+/*
+    zstd_internal - common functions to include
+    Header File for include
+    Copyright (C) 2014-2016, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd source repository : https://github.com/Cyan4973/zstd
+*/
+#ifndef ZSTD_CCOMMON_H_MODULE
+#define ZSTD_CCOMMON_H_MODULE
+
+
+
+/*-*************************************
+*  Common macros
+***************************************/
+#define MIN(a,b) ((a)<(b) ? (a) : (b))
+#define MAX(a,b) ((a)>(b) ? (a) : (b))
+
+
+/*-*************************************
+*  Common constants
+***************************************/
+#define ZSTDv05_DICT_MAGIC  0xEC30A435
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BLOCKSIZE (128 KB)                 /* define, for static allocation */
+
+static const size_t ZSTDv05_blockHeaderSize = 3;
+static const size_t ZSTDv05_frameHeaderSize_min = 5;
+#define ZSTDv05_frameHeaderSize_max 5         /* define, for static allocation */
+
+#define BITv057 128
+#define BITv056  64
+#define BITv055  32
+#define BITv054  16
+#define BITv051   2
+#define BITv050   1
+
+#define IS_HUFv05 0
+#define IS_PCH 1
+#define IS_RAW 2
+#define IS_RLE 3
+
+#define MINMATCH 4
+#define REPCODE_STARTVALUE 1
+
+#define Litbits  8
+#define MLbits   7
+#define LLbits   6
+#define Offbits  5
+#define MaxLit ((1<<Litbits) - 1)
+#define MaxML  ((1<<MLbits) - 1)
+#define MaxLL  ((1<<LLbits) - 1)
+#define MaxOff ((1<<Offbits)- 1)
+#define MLFSEv05Log   10
+#define LLFSEv05Log   10
+#define OffFSEv05Log   9
+#define MaxSeq MAX(MaxLL, MaxML)
+
+#define FSEv05_ENCODING_RAW     0
+#define FSEv05_ENCODING_RLE     1
+#define FSEv05_ENCODING_STATIC  2
+#define FSEv05_ENCODING_DYNAMIC 3
+
+
+#define HufLog 12
+
+#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
+#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */)   /* for a non-null block */
+
+#define WILDCOPY_OVERLENGTH 8
+
+typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
+
+
+/*-*******************************************
+*  Shared functions to include for inlining
+*********************************************/
+static void ZSTDv05_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
+
+#define COPY8(d,s) { ZSTDv05_copy8(d,s); d+=8; s+=8; }
+
+/*! ZSTDv05_wildcopy() :
+*   custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
+MEM_STATIC void ZSTDv05_wildcopy(void* dst, const void* src, ptrdiff_t length)
+{
+    const BYTE* ip = (const BYTE*)src;
+    BYTE* op = (BYTE*)dst;
+    BYTE* const oend = op + length;
+    do
+        COPY8(op, ip)
+    while (op < oend);
+}
+
+
+/*-*******************************************
+*  Private interfaces
+*********************************************/
+typedef struct {
+    void* buffer;
+    U32*  offsetStart;
+    U32*  offset;
+    BYTE* offCodeStart;
+    BYTE* offCode;
+    BYTE* litStart;
+    BYTE* lit;
+    BYTE* litLengthStart;
+    BYTE* litLength;
+    BYTE* matchLengthStart;
+    BYTE* matchLength;
+    BYTE* dumpsStart;
+    BYTE* dumps;
+    /* opt */
+    U32* matchLengthFreq;
+    U32* litLengthFreq;
+    U32* litFreq;
+    U32* offCodeFreq;
+    U32  matchLengthSum;
+    U32  litLengthSum;
+    U32  litSum;
+    U32  offCodeSum;
+} seqStore_t;
+
+
+
+#endif   /* ZSTDv05_CCOMMON_H_MODULE */
+/* ******************************************************************
+   FSEv05 : Finite State Entropy coder
+   header file
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+   - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef FSEv05_H
+#define FSEv05_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* *****************************************
+*  Includes
+******************************************/
+#include <stddef.h>    /* size_t, ptrdiff_t */
+
+
+/*-****************************************
+*  FSEv05 simple functions
+******************************************/
+size_t FSEv05_decompress(void* dst,  size_t maxDstSize,
+                const void* cSrc, size_t cSrcSize);
+/*!
+FSEv05_decompress():
+    Decompress FSEv05 data from buffer 'cSrc', of size 'cSrcSize',
+    into already allocated destination buffer 'dst', of size 'maxDstSize'.
+    return : size of regenerated data (<= maxDstSize)
+             or an error code, which can be tested using FSEv05_isError()
+
+    ** Important ** : FSEv05_decompress() doesn't decompress non-compressible nor RLE data !!!
+    Why ? : making this distinction requires a header.
+    Header management is intentionally delegated to the user layer, which can better manage special cases.
+*/
+
+
+/* *****************************************
+*  Tool functions
+******************************************/
+/* Error Management */
+unsigned    FSEv05_isError(size_t code);        /* tells if a return value is an error code */
+const char* FSEv05_getErrorName(size_t code);   /* provides error code string (useful for debugging) */
+
+
+
+
+/* *****************************************
+*  FSEv05 detailed API
+******************************************/
+/* *** DECOMPRESSION *** */
+
+/*!
+FSEv05_readNCount():
+   Read compactly saved 'normalizedCounter' from 'rBuffer'.
+   return : size read from 'rBuffer'
+            or an errorCode, which can be tested using FSEv05_isError()
+            maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
+size_t FSEv05_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
+
+/*!
+Constructor and Destructor of type FSEv05_DTable
+    Note that its size depends on 'tableLog' */
+typedef unsigned FSEv05_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
+FSEv05_DTable* FSEv05_createDTable(unsigned tableLog);
+void        FSEv05_freeDTable(FSEv05_DTable* dt);
+
+/*!
+FSEv05_buildDTable():
+   Builds 'dt', which must be already allocated, using FSEv05_createDTable()
+   @return : 0,
+             or an errorCode, which can be tested using FSEv05_isError() */
+size_t FSEv05_buildDTable (FSEv05_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*!
+FSEv05_decompress_usingDTable():
+   Decompress compressed source @cSrc of size @cSrcSize using `dt`
+   into `dst` which must be already allocated.
+   @return : size of regenerated data (necessarily <= @dstCapacity)
+             or an errorCode, which can be tested using FSEv05_isError() */
+size_t FSEv05_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSEv05_DTable* dt);
+
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif  /* FSEv05_H */
+/* ******************************************************************
+   bitstream
+   Part of FSEv05 library
+   header file (to include)
+   Copyright (C) 2013-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef BITv05STREAM_H_MODULE
+#define BITv05STREAM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*
+*  This API consists of small unitary functions, which highly benefit from being inlined.
+*  Since link-time-optimization is not available for all compilers,
+*  these functions are defined into a .h to be included.
+*/
+
+
+
+/*-********************************************
+*  bitStream decoding API (read backward)
+**********************************************/
+typedef struct
+{
+    size_t   bitContainer;
+    unsigned bitsConsumed;
+    const char* ptr;
+    const char* start;
+} BITv05_DStream_t;
+
+typedef enum { BITv05_DStream_unfinished = 0,
+               BITv05_DStream_endOfBuffer = 1,
+               BITv05_DStream_completed = 2,
+               BITv05_DStream_overflow = 3 } BITv05_DStream_status;  /* result of BITv05_reloadDStream() */
+               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+
+MEM_STATIC size_t   BITv05_initDStream(BITv05_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
+MEM_STATIC size_t   BITv05_readBits(BITv05_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BITv05_DStream_status BITv05_reloadDStream(BITv05_DStream_t* bitD);
+MEM_STATIC unsigned BITv05_endOfDStream(const BITv05_DStream_t* bitD);
+
+
+/*-****************************************
+*  unsafe API
+******************************************/
+MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, unsigned nbBits);
+/* faster, but works only if nbBits >= 1 */
+
+
+
+/*-**************************************************************
+*  Helper functions
+****************************************************************/
+MEM_STATIC unsigned BITv05_highbit32 (U32 val)
+{
+#   if defined(_MSC_VER)   /* Visual */
+    unsigned long r=0;
+    _BitScanReverse ( &r, val );
+    return (unsigned) r;
+#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */
+    return 31 - __builtin_clz (val);
+#   else   /* Software version */
+    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+    U32 v = val;
+    unsigned r;
+    v |= v >> 1;
+    v |= v >> 2;
+    v |= v >> 4;
+    v |= v >> 8;
+    v |= v >> 16;
+    r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+    return r;
+#   endif
+}
+
+
+
+/*-********************************************************
+* bitStream decoding
+**********************************************************/
+/*!BITv05_initDStream
+*  Initialize a BITv05_DStream_t.
+*  @bitD : a pointer to an already allocated BITv05_DStream_t structure
+*  @srcBuffer must point at the beginning of a bitStream
+*  @srcSize must be the exact size of the bitStream
+*  @result : size of stream (== srcSize) or an errorCode if a problem is detected
+*/
+MEM_STATIC size_t BITv05_initDStream(BITv05_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
+{
+    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
+
+    if (srcSize >=  sizeof(size_t)) {  /* normal case */
+        U32 contain32;
+        bitD->start = (const char*)srcBuffer;
+        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(size_t);
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);
+        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
+        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */
+        bitD->bitsConsumed = 8 - BITv05_highbit32(contain32);
+    } else {
+        U32 contain32;
+        bitD->start = (const char*)srcBuffer;
+        bitD->ptr   = bitD->start;
+        bitD->bitContainer = *(const BYTE*)(bitD->start);
+        switch(srcSize)
+        {
+            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);/* fall-through */
+            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);/* fall-through */
+            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);/* fall-through */
+            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fall-through */
+            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fall-through */
+            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) <<  8; /* fall-through */
+            default: break;
+        }
+        contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
+        if (contain32 == 0) return ERROR(GENERIC);   /* endMark not present */
+        bitD->bitsConsumed = 8 - BITv05_highbit32(contain32);
+        bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
+    }
+
+    return srcSize;
+}
+
+MEM_STATIC size_t BITv05_lookBits(BITv05_DStream_t* bitD, U32 nbBits)
+{
+    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
+    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
+}
+
+/*! BITv05_lookBitsFast :
+*   unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BITv05_lookBitsFast(BITv05_DStream_t* bitD, U32 nbBits)
+{
+    const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
+    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
+}
+
+MEM_STATIC void BITv05_skipBits(BITv05_DStream_t* bitD, U32 nbBits)
+{
+    bitD->bitsConsumed += nbBits;
+}
+
+MEM_STATIC size_t BITv05_readBits(BITv05_DStream_t* bitD, U32 nbBits)
+{
+    size_t value = BITv05_lookBits(bitD, nbBits);
+    BITv05_skipBits(bitD, nbBits);
+    return value;
+}
+
+/*!BITv05_readBitsFast :
+*  unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, U32 nbBits)
+{
+    size_t value = BITv05_lookBitsFast(bitD, nbBits);
+    BITv05_skipBits(bitD, nbBits);
+    return value;
+}
+
+MEM_STATIC BITv05_DStream_status BITv05_reloadDStream(BITv05_DStream_t* bitD)
+{
+    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
+        return BITv05_DStream_overflow;
+
+    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
+        bitD->ptr -= bitD->bitsConsumed >> 3;
+        bitD->bitsConsumed &= 7;
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);
+        return BITv05_DStream_unfinished;
+    }
+    if (bitD->ptr == bitD->start) {
+        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BITv05_DStream_endOfBuffer;
+        return BITv05_DStream_completed;
+    }
+    {
+        U32 nbBytes = bitD->bitsConsumed >> 3;
+        BITv05_DStream_status result = BITv05_DStream_unfinished;
+        if (bitD->ptr - nbBytes < bitD->start) {
+            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */
+            result = BITv05_DStream_endOfBuffer;
+        }
+        bitD->ptr -= nbBytes;
+        bitD->bitsConsumed -= nbBytes*8;
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */
+        return result;
+    }
+}
+
+/*! BITv05_endOfDStream
+*   @return Tells if DStream has reached its exact end
+*/
+MEM_STATIC unsigned BITv05_endOfDStream(const BITv05_DStream_t* DStream)
+{
+    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* BITv05STREAM_H_MODULE */
+/* ******************************************************************
+   FSEv05 : Finite State Entropy coder
+   header file for static linking (only)
+   Copyright (C) 2013-2015, Yann Collet
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+   - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef FSEv05_STATIC_H
+#define FSEv05_STATIC_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+
+/* *****************************************
+*  Static allocation
+*******************************************/
+/* It is possible to statically allocate FSEv05 CTable/DTable as a table of unsigned using below macros */
+#define FSEv05_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))
+
+
+/* *****************************************
+*  FSEv05 advanced API
+*******************************************/
+size_t FSEv05_buildDTable_raw (FSEv05_DTable* dt, unsigned nbBits);
+/* build a fake FSEv05_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
+
+size_t FSEv05_buildDTable_rle (FSEv05_DTable* dt, unsigned char symbolValue);
+/* build a fake FSEv05_DTable, designed to always generate the same symbolValue */
+
+
+
+/* *****************************************
+*  FSEv05 symbol decompression API
+*******************************************/
+typedef struct
+{
+    size_t      state;
+    const void* table;   /* precise table may vary, depending on U16 */
+} FSEv05_DState_t;
+
+
+static void     FSEv05_initDState(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD, const FSEv05_DTable* dt);
+
+static unsigned char FSEv05_decodeSymbol(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD);
+
+static unsigned FSEv05_endOfDState(const FSEv05_DState_t* DStatePtr);
+
+
+
+/* *****************************************
+*  FSEv05 unsafe API
+*******************************************/
+static unsigned char FSEv05_decodeSymbolFast(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD);
+/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
+
+
+/* *****************************************
+*  Implementation of inlined functions
+*******************************************/
+/* decompression */
+
+typedef struct {
+    U16 tableLog;
+    U16 fastMode;
+} FSEv05_DTableHeader;   /* sizeof U32 */
+
+typedef struct
+{
+    unsigned short newState;
+    unsigned char  symbol;
+    unsigned char  nbBits;
+} FSEv05_decode_t;   /* size == U32 */
+
+MEM_STATIC void FSEv05_initDState(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD, const FSEv05_DTable* dt)
+{
+    const void* ptr = dt;
+    const FSEv05_DTableHeader* const DTableH = (const FSEv05_DTableHeader*)ptr;
+    DStatePtr->state = BITv05_readBits(bitD, DTableH->tableLog);
+    BITv05_reloadDStream(bitD);
+    DStatePtr->table = dt + 1;
+}
+
+MEM_STATIC BYTE FSEv05_peakSymbol(FSEv05_DState_t* DStatePtr)
+{
+    const FSEv05_decode_t DInfo = ((const FSEv05_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    return DInfo.symbol;
+}
+
+MEM_STATIC BYTE FSEv05_decodeSymbol(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD)
+{
+    const FSEv05_decode_t DInfo = ((const FSEv05_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    const U32  nbBits = DInfo.nbBits;
+    BYTE symbol = DInfo.symbol;
+    size_t lowBits = BITv05_readBits(bitD, nbBits);
+
+    DStatePtr->state = DInfo.newState + lowBits;
+    return symbol;
+}
+
+MEM_STATIC BYTE FSEv05_decodeSymbolFast(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD)
+{
+    const FSEv05_decode_t DInfo = ((const FSEv05_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    const U32 nbBits = DInfo.nbBits;
+    BYTE symbol = DInfo.symbol;
+    size_t lowBits = BITv05_readBitsFast(bitD, nbBits);
+
+    DStatePtr->state = DInfo.newState + lowBits;
+    return symbol;
+}
+
+MEM_STATIC unsigned FSEv05_endOfDState(const FSEv05_DState_t* DStatePtr)
+{
+    return DStatePtr->state == 0;
+}
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif  /* FSEv05_STATIC_H */
+/* ******************************************************************
+   FSEv05 : Finite State Entropy coder
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSEv05 source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+#ifndef FSEv05_COMMONDEFS_ONLY
+
+/* **************************************************************
+*  Tuning parameters
+****************************************************************/
+/*!MEMORY_USAGE :
+*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+*  Increasing memory usage improves compression ratio
+*  Reduced memory usage can improve speed, due to cache effect
+*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#define FSEv05_MAX_MEMORY_USAGE 14
+#define FSEv05_DEFAULT_MEMORY_USAGE 13
+
+/*!FSEv05_MAX_SYMBOL_VALUE :
+*  Maximum symbol value authorized.
+*  Required for proper stack allocation */
+#define FSEv05_MAX_SYMBOL_VALUE 255
+
+
+/* **************************************************************
+*  template functions type & suffix
+****************************************************************/
+#define FSEv05_FUNCTION_TYPE BYTE
+#define FSEv05_FUNCTION_EXTENSION
+#define FSEv05_DECODE_TYPE FSEv05_decode_t
+
+
+#endif   /* !FSEv05_COMMONDEFS_ONLY */
+
+/* **************************************************************
+*  Compiler specifics
+****************************************************************/
+#ifdef _MSC_VER    /* Visual Studio */
+#  define FORCE_INLINE static __forceinline
+#  include <intrin.h>                    /* For Visual 2005 */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */
+#else
+#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
+#    ifdef __GNUC__
+#      define FORCE_INLINE static inline __attribute__((always_inline))
+#    else
+#      define FORCE_INLINE static inline
+#    endif
+#  else
+#    define FORCE_INLINE static
+#  endif /* __STDC_VERSION__ */
+#endif
+
+
+/* **************************************************************
+*  Includes
+****************************************************************/
+#include <stdlib.h>     /* malloc, free, qsort */
+#include <string.h>     /* memcpy, memset */
+#include <stdio.h>      /* printf (debug) */
+
+
+
+/* ***************************************************************
+*  Constants
+*****************************************************************/
+#define FSEv05_MAX_TABLELOG  (FSEv05_MAX_MEMORY_USAGE-2)
+#define FSEv05_MAX_TABLESIZE (1U<<FSEv05_MAX_TABLELOG)
+#define FSEv05_MAXTABLESIZE_MASK (FSEv05_MAX_TABLESIZE-1)
+#define FSEv05_DEFAULT_TABLELOG (FSEv05_DEFAULT_MEMORY_USAGE-2)
+#define FSEv05_MIN_TABLELOG 5
+
+#define FSEv05_TABLELOG_ABSOLUTE_MAX 15
+#if FSEv05_MAX_TABLELOG > FSEv05_TABLELOG_ABSOLUTE_MAX
+#error "FSEv05_MAX_TABLELOG > FSEv05_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+
+/* **************************************************************
+*  Error Management
+****************************************************************/
+#define FSEv05_STATIC_ASSERT(c) { enum { FSEv05_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+
+
+/* **************************************************************
+*  Complex types
+****************************************************************/
+typedef U32 DTable_max_t[FSEv05_DTABLE_SIZE_U32(FSEv05_MAX_TABLELOG)];
+
+
+/* **************************************************************
+*  Templates
+****************************************************************/
+/*
+  designed to be included
+  for type-specific functions (template emulation in C)
+  Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSEv05_FUNCTION_EXTENSION
+#  error "FSEv05_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSEv05_FUNCTION_TYPE
+#  error "FSEv05_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSEv05_CAT(X,Y) X##Y
+#define FSEv05_FUNCTION_NAME(X,Y) FSEv05_CAT(X,Y)
+#define FSEv05_TYPE_NAME(X,Y) FSEv05_CAT(X,Y)
+
+
+/* Function templates */
+static U32 FSEv05_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
+
+
+
+FSEv05_DTable* FSEv05_createDTable (unsigned tableLog)
+{
+    if (tableLog > FSEv05_TABLELOG_ABSOLUTE_MAX) tableLog = FSEv05_TABLELOG_ABSOLUTE_MAX;
+    return (FSEv05_DTable*)malloc( FSEv05_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
+}
+
+void FSEv05_freeDTable (FSEv05_DTable* dt)
+{
+    free(dt);
+}
+
+size_t FSEv05_buildDTable(FSEv05_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+    FSEv05_DTableHeader DTableH;
+    void* const tdPtr = dt+1;   /* because dt is unsigned, 32-bits aligned on 32-bits */
+    FSEv05_DECODE_TYPE* const tableDecode = (FSEv05_DECODE_TYPE*) (tdPtr);
+    const U32 tableSize = 1 << tableLog;
+    const U32 tableMask = tableSize-1;
+    const U32 step = FSEv05_tableStep(tableSize);
+    U16 symbolNext[FSEv05_MAX_SYMBOL_VALUE+1];
+    U32 position = 0;
+    U32 highThreshold = tableSize-1;
+    const S16 largeLimit= (S16)(1 << (tableLog-1));
+    U32 noLarge = 1;
+    U32 s;
+
+    /* Sanity Checks */
+    if (maxSymbolValue > FSEv05_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
+    if (tableLog > FSEv05_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+
+    /* Init, lay down lowprob symbols */
+    DTableH.tableLog = (U16)tableLog;
+    for (s=0; s<=maxSymbolValue; s++) {
+        if (normalizedCounter[s]==-1) {
+            tableDecode[highThreshold--].symbol = (FSEv05_FUNCTION_TYPE)s;
+            symbolNext[s] = 1;
+        } else {
+            if (normalizedCounter[s] >= largeLimit) noLarge=0;
+            symbolNext[s] = normalizedCounter[s];
+    }   }
+
+    /* Spread symbols */
+    for (s=0; s<=maxSymbolValue; s++) {
+        int i;
+        for (i=0; i<normalizedCounter[s]; i++) {
+            tableDecode[position].symbol = (FSEv05_FUNCTION_TYPE)s;
+            position = (position + step) & tableMask;
+            while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
+    }   }
+
+    if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+
+    /* Build Decoding table */
+    {
+        U32 i;
+        for (i=0; i<tableSize; i++) {
+            FSEv05_FUNCTION_TYPE symbol = (FSEv05_FUNCTION_TYPE)(tableDecode[i].symbol);
+            U16 nextState = symbolNext[symbol]++;
+            tableDecode[i].nbBits = (BYTE) (tableLog - BITv05_highbit32 ((U32)nextState) );
+            tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
+    }   }
+
+    DTableH.fastMode = (U16)noLarge;
+    memcpy(dt, &DTableH, sizeof(DTableH));
+    return 0;
+}
+
+
+#ifndef FSEv05_COMMONDEFS_ONLY
+/*-****************************************
+*  FSEv05 helper functions
+******************************************/
+unsigned FSEv05_isError(size_t code) { return ERR_isError(code); }
+
+const char* FSEv05_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/*-**************************************************************
+*  FSEv05 NCount encoding-decoding
+****************************************************************/
+static short FSEv05_abs(short a) { return a<0 ? -a : a; }
+
+
+size_t FSEv05_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+                 const void* headerBuffer, size_t hbSize)
+{
+    const BYTE* const istart = (const BYTE*) headerBuffer;
+    const BYTE* const iend = istart + hbSize;
+    const BYTE* ip = istart;
+    int nbBits;
+    int remaining;
+    int threshold;
+    U32 bitStream;
+    int bitCount;
+    unsigned charnum = 0;
+    int previous0 = 0;
+
+    if (hbSize < 4) return ERROR(srcSize_wrong);
+    bitStream = MEM_readLE32(ip);
+    nbBits = (bitStream & 0xF) + FSEv05_MIN_TABLELOG;   /* extract tableLog */
+    if (nbBits > FSEv05_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
+    bitStream >>= 4;
+    bitCount = 4;
+    *tableLogPtr = nbBits;
+    remaining = (1<<nbBits)+1;
+    threshold = 1<<nbBits;
+    nbBits++;
+
+    while ((remaining>1) && (charnum<=*maxSVPtr)) {
+        if (previous0) {
+            unsigned n0 = charnum;
+            while ((bitStream & 0xFFFF) == 0xFFFF) {
+                n0+=24;
+                if (ip < iend-5) {
+                    ip+=2;
+                    bitStream = MEM_readLE32(ip) >> bitCount;
+                } else {
+                    bitStream >>= 16;
+                    bitCount+=16;
+            }   }
+            while ((bitStream & 3) == 3) {
+                n0+=3;
+                bitStream>>=2;
+                bitCount+=2;
+            }
+            n0 += bitStream & 3;
+            bitCount += 2;
+            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
+            while (charnum < n0) normalizedCounter[charnum++] = 0;
+            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+                ip += bitCount>>3;
+                bitCount &= 7;
+                bitStream = MEM_readLE32(ip) >> bitCount;
+            }
+            else
+                bitStream >>= 2;
+        }
+        {
+            const short max = (short)((2*threshold-1)-remaining);
+            short count;
+
+            if ((bitStream & (threshold-1)) < (U32)max) {
+                count = (short)(bitStream & (threshold-1));
+                bitCount   += nbBits-1;
+            } else {
+                count = (short)(bitStream & (2*threshold-1));
+                if (count >= threshold) count -= max;
+                bitCount   += nbBits;
+            }
+
+            count--;   /* extra accuracy */
+            remaining -= FSEv05_abs(count);
+            normalizedCounter[charnum++] = count;
+            previous0 = !count;
+            while (remaining < threshold) {
+                nbBits--;
+                threshold >>= 1;
+            }
+
+            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+                ip += bitCount>>3;
+                bitCount &= 7;
+            } else {
+                bitCount -= (int)(8 * (iend - 4 - ip));
+                ip = iend - 4;
+            }
+            bitStream = MEM_readLE32(ip) >> (bitCount & 31);
+    }   }
+    if (remaining != 1) return ERROR(GENERIC);
+    *maxSVPtr = charnum-1;
+
+    ip += (bitCount+7)>>3;
+    if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
+    return ip-istart;
+}
+
+
+
+/*-*******************************************************
+*  Decompression (Byte symbols)
+*********************************************************/
+size_t FSEv05_buildDTable_rle (FSEv05_DTable* dt, BYTE symbolValue)
+{
+    void* ptr = dt;
+    FSEv05_DTableHeader* const DTableH = (FSEv05_DTableHeader*)ptr;
+    void* dPtr = dt + 1;
+    FSEv05_decode_t* const cell = (FSEv05_decode_t*)dPtr;
+
+    DTableH->tableLog = 0;
+    DTableH->fastMode = 0;
+
+    cell->newState = 0;
+    cell->symbol = symbolValue;
+    cell->nbBits = 0;
+
+    return 0;
+}
+
+
+size_t FSEv05_buildDTable_raw (FSEv05_DTable* dt, unsigned nbBits)
+{
+    void* ptr = dt;
+    FSEv05_DTableHeader* const DTableH = (FSEv05_DTableHeader*)ptr;
+    void* dPtr = dt + 1;
+    FSEv05_decode_t* const dinfo = (FSEv05_decode_t*)dPtr;
+    const unsigned tableSize = 1 << nbBits;
+    const unsigned tableMask = tableSize - 1;
+    const unsigned maxSymbolValue = tableMask;
+    unsigned s;
+
+    /* Sanity checks */
+    if (nbBits < 1) return ERROR(GENERIC);         /* min size */
+
+    /* Build Decoding Table */
+    DTableH->tableLog = (U16)nbBits;
+    DTableH->fastMode = 1;
+    for (s=0; s<=maxSymbolValue; s++) {
+        dinfo[s].newState = 0;
+        dinfo[s].symbol = (BYTE)s;
+        dinfo[s].nbBits = (BYTE)nbBits;
+    }
+
+    return 0;
+}
+
+FORCE_INLINE size_t FSEv05_decompress_usingDTable_generic(
+          void* dst, size_t maxDstSize,
+    const void* cSrc, size_t cSrcSize,
+    const FSEv05_DTable* dt, const unsigned fast)
+{
+    BYTE* const ostart = (BYTE*) dst;
+    BYTE* op = ostart;
+    BYTE* const omax = op + maxDstSize;
+    BYTE* const olimit = omax-3;
+
+    BITv05_DStream_t bitD;
+    FSEv05_DState_t state1;
+    FSEv05_DState_t state2;
+    size_t errorCode;
+
+    /* Init */
+    errorCode = BITv05_initDStream(&bitD, cSrc, cSrcSize);   /* replaced last arg by maxCompressed Size */
+    if (FSEv05_isError(errorCode)) return errorCode;
+
+    FSEv05_initDState(&state1, &bitD, dt);
+    FSEv05_initDState(&state2, &bitD, dt);
+
+#define FSEv05_GETSYMBOL(statePtr) fast ? FSEv05_decodeSymbolFast(statePtr, &bitD) : FSEv05_decodeSymbol(statePtr, &bitD)
+
+    /* 4 symbols per loop */
+    for ( ; (BITv05_reloadDStream(&bitD)==BITv05_DStream_unfinished) && (op<olimit) ; op+=4) {
+        op[0] = FSEv05_GETSYMBOL(&state1);
+
+        if (FSEv05_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            BITv05_reloadDStream(&bitD);
+
+        op[1] = FSEv05_GETSYMBOL(&state2);
+
+        if (FSEv05_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            { if (BITv05_reloadDStream(&bitD) > BITv05_DStream_unfinished) { op+=2; break; } }
+
+        op[2] = FSEv05_GETSYMBOL(&state1);
+
+        if (FSEv05_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            BITv05_reloadDStream(&bitD);
+
+        op[3] = FSEv05_GETSYMBOL(&state2);
+    }
+
+    /* tail */
+    /* note : BITv05_reloadDStream(&bitD) >= FSEv05_DStream_partiallyFilled; Ends at exactly BITv05_DStream_completed */
+    while (1) {
+        if ( (BITv05_reloadDStream(&bitD)>BITv05_DStream_completed) || (op==omax) || (BITv05_endOfDStream(&bitD) && (fast || FSEv05_endOfDState(&state1))) )
+            break;
+
+        *op++ = FSEv05_GETSYMBOL(&state1);
+
+        if ( (BITv05_reloadDStream(&bitD)>BITv05_DStream_completed) || (op==omax) || (BITv05_endOfDStream(&bitD) && (fast || FSEv05_endOfDState(&state2))) )
+            break;
+
+        *op++ = FSEv05_GETSYMBOL(&state2);
+    }
+
+    /* end ? */
+    if (BITv05_endOfDStream(&bitD) && FSEv05_endOfDState(&state1) && FSEv05_endOfDState(&state2))
+        return op-ostart;
+
+    if (op==omax) return ERROR(dstSize_tooSmall);   /* dst buffer is full, but cSrc unfinished */
+
+    return ERROR(corruption_detected);
+}
+
+
+size_t FSEv05_decompress_usingDTable(void* dst, size_t originalSize,
+                            const void* cSrc, size_t cSrcSize,
+                            const FSEv05_DTable* dt)
+{
+    const void* ptr = dt;
+    const FSEv05_DTableHeader* DTableH = (const FSEv05_DTableHeader*)ptr;
+    const U32 fastMode = DTableH->fastMode;
+
+    /* select fast mode (static) */
+    if (fastMode) return FSEv05_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+    return FSEv05_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+
+size_t FSEv05_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
+{
+    const BYTE* const istart = (const BYTE*)cSrc;
+    const BYTE* ip = istart;
+    short counting[FSEv05_MAX_SYMBOL_VALUE+1];
+    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */
+    unsigned tableLog;
+    unsigned maxSymbolValue = FSEv05_MAX_SYMBOL_VALUE;
+    size_t errorCode;
+
+    if (cSrcSize<2) return ERROR(srcSize_wrong);   /* too small input size */
+
+    /* normal FSEv05 decoding mode */
+    errorCode = FSEv05_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
+    if (FSEv05_isError(errorCode)) return errorCode;
+    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size */
+    ip += errorCode;
+    cSrcSize -= errorCode;
+
+    errorCode = FSEv05_buildDTable (dt, counting, maxSymbolValue, tableLog);
+    if (FSEv05_isError(errorCode)) return errorCode;
+
+    /* always return, even if it is an error code */
+    return FSEv05_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
+}
+
+
+
+#endif   /* FSEv05_COMMONDEFS_ONLY */
+/* ******************************************************************
+   Huff0 : Huffman coder, part of New Generation Entropy library
+   header file
+   Copyright (C) 2013-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef HUFF0_H
+#define HUFF0_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+
+/* ****************************************
+*  Huff0 simple functions
+******************************************/
+size_t HUFv05_decompress(void* dst,  size_t dstSize,
+                const void* cSrc, size_t cSrcSize);
+/*!
+HUFv05_decompress():
+    Decompress Huff0 data from buffer 'cSrc', of size 'cSrcSize',
+    into already allocated destination buffer 'dst', of size 'dstSize'.
+    @dstSize : must be the **exact** size of original (uncompressed) data.
+    Note : in contrast with FSEv05, HUFv05_decompress can regenerate
+           RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
+           because it knows size to regenerate.
+    @return : size of regenerated data (== dstSize)
+              or an error code, which can be tested using HUFv05_isError()
+*/
+
+
+/* ****************************************
+*  Tool functions
+******************************************/
+/* Error Management */
+unsigned    HUFv05_isError(size_t code);        /* tells if a return value is an error code */
+const char* HUFv05_getErrorName(size_t code);   /* provides error code string (useful for debugging) */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif   /* HUF0_H */
+/* ******************************************************************
+   Huff0 : Huffman codec, part of New Generation Entropy library
+   header file, for static linking only
+   Copyright (C) 2013-2016, Yann Collet
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef HUF0_STATIC_H
+#define HUF0_STATIC_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+
+/* ****************************************
+*  Static allocation
+******************************************/
+/* static allocation of Huff0's DTable */
+#define HUFv05_DTABLE_SIZE(maxTableLog)   (1 + (1<<maxTableLog))
+#define HUFv05_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
+        unsigned short DTable[HUFv05_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
+#define HUFv05_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
+        unsigned int DTable[HUFv05_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
+#define HUFv05_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
+        unsigned int DTable[HUFv05_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
+
+
+/* ****************************************
+*  Advanced decompression functions
+******************************************/
+size_t HUFv05_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
+size_t HUFv05_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbols decoder */
+
+
+/* ****************************************
+*  Huff0 detailed API
+******************************************/
+/*!
+HUFv05_decompress() does the following:
+1. select the decompression algorithm (X2, X4, X6) based on pre-computed heuristics
+2. build Huffman table from save, using HUFv05_readDTableXn()
+3. decode 1 or 4 segments in parallel using HUFv05_decompressSXn_usingDTable
+*/
+size_t HUFv05_readDTableX2 (unsigned short* DTable, const void* src, size_t srcSize);
+size_t HUFv05_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize);
+
+size_t HUFv05_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);
+size_t HUFv05_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);
+
+
+/* single stream variants */
+
+size_t HUFv05_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
+size_t HUFv05_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbol decoder */
+
+size_t HUFv05_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);
+size_t HUFv05_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);
+
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* HUF0_STATIC_H */
+/* ******************************************************************
+   Huff0 : Huffman coder, part of New Generation Entropy library
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSEv05+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* **************************************************************
+*  Compiler specifics
+****************************************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+/* inline is defined */
+#elif defined(_MSC_VER)
+#  define inline __inline
+#else
+#  define inline /* disable inline */
+#endif
+
+
+#ifdef _MSC_VER    /* Visual Studio */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#endif
+
+
+/* **************************************************************
+*  Includes
+****************************************************************/
+#include <stdlib.h>     /* malloc, free, qsort */
+#include <string.h>     /* memcpy, memset */
+#include <stdio.h>      /* printf (debug) */
+
+
+/* **************************************************************
+*  Constants
+****************************************************************/
+#define HUFv05_ABSOLUTEMAX_TABLELOG  16   /* absolute limit of HUFv05_MAX_TABLELOG. Beyond that value, code does not work */
+#define HUFv05_MAX_TABLELOG  12           /* max configured tableLog (for static allocation); can be modified up to HUFv05_ABSOLUTEMAX_TABLELOG */
+#define HUFv05_DEFAULT_TABLELOG  HUFv05_MAX_TABLELOG   /* tableLog by default, when not specified */
+#define HUFv05_MAX_SYMBOL_VALUE 255
+#if (HUFv05_MAX_TABLELOG > HUFv05_ABSOLUTEMAX_TABLELOG)
+#  error "HUFv05_MAX_TABLELOG is too large !"
+#endif
+
+
+/* **************************************************************
+*  Error Management
+****************************************************************/
+unsigned HUFv05_isError(size_t code) { return ERR_isError(code); }
+const char* HUFv05_getErrorName(size_t code) { return ERR_getErrorName(code); }
+#define HUFv05_STATIC_ASSERT(c) { enum { HUFv05_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+
+
+/* *******************************************************
+*  Huff0 : Huffman block decompression
+*********************************************************/
+typedef struct { BYTE byte; BYTE nbBits; } HUFv05_DEltX2;   /* single-symbol decoding */
+
+typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUFv05_DEltX4;  /* double-symbols decoding */
+
+typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
+
+/*! HUFv05_readStats
+    Read compact Huffman tree, saved by HUFv05_writeCTable
+    @huffWeight : destination buffer
+    @return : size read from `src`
+*/
+static size_t HUFv05_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+                            U32* nbSymbolsPtr, U32* tableLogPtr,
+                            const void* src, size_t srcSize)
+{
+    U32 weightTotal;
+    U32 tableLog;
+    const BYTE* ip = (const BYTE*) src;
+    size_t iSize;
+    size_t oSize;
+    U32 n;
+
+    if (!srcSize) return ERROR(srcSize_wrong);
+    iSize = ip[0];
+    //memset(huffWeight, 0, hwSize);   /* is not necessary, even though some analyzer complain ... */
+
+    if (iSize >= 128)  { /* special header */
+        if (iSize >= (242)) {  /* RLE */
+            static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
+            oSize = l[iSize-242];
+            memset(huffWeight, 1, hwSize);
+            iSize = 0;
+        }
+        else {   /* Incompressible */
+            oSize = iSize - 127;
+            iSize = ((oSize+1)/2);
+            if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+            if (oSize >= hwSize) return ERROR(corruption_detected);
+            ip += 1;
+            for (n=0; n<oSize; n+=2) {
+                huffWeight[n]   = ip[n/2] >> 4;
+                huffWeight[n+1] = ip[n/2] & 15;
+    }   }   }
+    else  {   /* header compressed with FSEv05 (normal case) */
+        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+        oSize = FSEv05_decompress(huffWeight, hwSize-1, ip+1, iSize);   /* max (hwSize-1) values decoded, as last one is implied */
+        if (FSEv05_isError(oSize)) return oSize;
+    }
+
+    /* collect weight stats */
+    memset(rankStats, 0, (HUFv05_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
+    weightTotal = 0;
+    for (n=0; n<oSize; n++) {
+        if (huffWeight[n] >= HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
+        rankStats[huffWeight[n]]++;
+        weightTotal += (1 << huffWeight[n]) >> 1;
+    }
+    if (weightTotal == 0) return ERROR(corruption_detected);
+
+    /* get last non-null symbol weight (implied, total must be 2^n) */
+    tableLog = BITv05_highbit32(weightTotal) + 1;
+    if (tableLog > HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
+    {   /* determine last weight */
+        U32 total = 1 << tableLog;
+        U32 rest = total - weightTotal;
+        U32 verif = 1 << BITv05_highbit32(rest);
+        U32 lastWeight = BITv05_highbit32(rest) + 1;
+        if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */
+        huffWeight[oSize] = (BYTE)lastWeight;
+        rankStats[lastWeight]++;
+    }
+
+    /* check tree construction validity */
+    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */
+
+    /* results */
+    *nbSymbolsPtr = (U32)(oSize+1);
+    *tableLogPtr = tableLog;
+    return iSize+1;
+}
+
+
+/*-***************************/
+/*  single-symbol decoding   */
+/*-***************************/
+
+size_t HUFv05_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
+{
+    BYTE huffWeight[HUFv05_MAX_SYMBOL_VALUE + 1];
+    U32 rankVal[HUFv05_ABSOLUTEMAX_TABLELOG + 1];   /* large enough for values from 0 to 16 */
+    U32 tableLog = 0;
+    size_t iSize;
+    U32 nbSymbols = 0;
+    U32 n;
+    U32 nextRankStart;
+    void* const dtPtr = DTable + 1;
+    HUFv05_DEltX2* const dt = (HUFv05_DEltX2*)dtPtr;
+
+    HUFv05_STATIC_ASSERT(sizeof(HUFv05_DEltX2) == sizeof(U16));   /* if compilation fails here, assertion is false */
+    //memset(huffWeight, 0, sizeof(huffWeight));   /* is not necessary, even though some analyzer complain ... */
+
+    iSize = HUFv05_readStats(huffWeight, HUFv05_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
+    if (HUFv05_isError(iSize)) return iSize;
+
+    /* check result */
+    if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge);   /* DTable is too small */
+    DTable[0] = (U16)tableLog;   /* maybe should separate sizeof allocated DTable, from used size of DTable, in case of re-use */
+
+    /* Prepare ranks */
+    nextRankStart = 0;
+    for (n=1; n<=tableLog; n++) {
+        U32 current = nextRankStart;
+        nextRankStart += (rankVal[n] << (n-1));
+        rankVal[n] = current;
+    }
+
+    /* fill DTable */
+    for (n=0; n<nbSymbols; n++) {
+        const U32 w = huffWeight[n];
+        const U32 length = (1 << w) >> 1;
+        U32 i;
+        HUFv05_DEltX2 D;
+        D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
+        for (i = rankVal[w]; i < rankVal[w] + length; i++)
+            dt[i] = D;
+        rankVal[w] += length;
+    }
+
+    return iSize;
+}
+
+static BYTE HUFv05_decodeSymbolX2(BITv05_DStream_t* Dstream, const HUFv05_DEltX2* dt, const U32 dtLog)
+{
+        const size_t val = BITv05_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+        const BYTE c = dt[val].byte;
+        BITv05_skipBits(Dstream, dt[val].nbBits);
+        return c;
+}
+
+#define HUFv05_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+    *ptr++ = HUFv05_decodeSymbolX2(DStreamPtr, dt, dtLog)
+
+#define HUFv05_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+    if (MEM_64bits() || (HUFv05_MAX_TABLELOG<=12)) \
+        HUFv05_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+#define HUFv05_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+    if (MEM_64bits()) \
+        HUFv05_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+static inline size_t HUFv05_decodeStreamX2(BYTE* p, BITv05_DStream_t* const bitDPtr, BYTE* const pEnd, const HUFv05_DEltX2* const dt, const U32 dtLog)
+{
+    BYTE* const pStart = p;
+
+    /* up to 4 symbols at a time */
+    while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p <= pEnd-4)) {
+        HUFv05_DECODE_SYMBOLX2_2(p, bitDPtr);
+        HUFv05_DECODE_SYMBOLX2_1(p, bitDPtr);
+        HUFv05_DECODE_SYMBOLX2_2(p, bitDPtr);
+        HUFv05_DECODE_SYMBOLX2_0(p, bitDPtr);
+    }
+
+    /* closer to the end */
+    while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p < pEnd))
+        HUFv05_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+    /* no more data to retrieve from bitstream, hence no need to reload */
+    while (p < pEnd)
+        HUFv05_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+    return pEnd-pStart;
+}
+
+size_t HUFv05_decompress1X2_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const U16* DTable)
+{
+    BYTE* op = (BYTE*)dst;
+    BYTE* const oend = op + dstSize;
+    const U32 dtLog = DTable[0];
+    const void* dtPtr = DTable;
+    const HUFv05_DEltX2* const dt = ((const HUFv05_DEltX2*)dtPtr)+1;
+    BITv05_DStream_t bitD;
+
+    if (dstSize <= cSrcSize) return ERROR(dstSize_tooSmall);
+    { size_t const errorCode = BITv05_initDStream(&bitD, cSrc, cSrcSize);
+      if (HUFv05_isError(errorCode)) return errorCode; }
+
+    HUFv05_decodeStreamX2(op, &bitD, oend, dt, dtLog);
+
+    /* check */
+    if (!BITv05_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+    return dstSize;
+}
+
+size_t HUFv05_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUFv05_CREATE_STATIC_DTABLEX2(DTable, HUFv05_MAX_TABLELOG);
+    const BYTE* ip = (const BYTE*) cSrc;
+    size_t errorCode;
+
+    errorCode = HUFv05_readDTableX2 (DTable, cSrc, cSrcSize);
+    if (HUFv05_isError(errorCode)) return errorCode;
+    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += errorCode;
+    cSrcSize -= errorCode;
+
+    return HUFv05_decompress1X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+size_t HUFv05_decompress4X2_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const U16* DTable)
+{
+    const BYTE* const istart = (const BYTE*) cSrc;
+    BYTE* const ostart = (BYTE*) dst;
+    BYTE* const oend = ostart + dstSize;
+    const void* const dtPtr = DTable;
+    const HUFv05_DEltX2* const dt = ((const HUFv05_DEltX2*)dtPtr) +1;
+    const U32 dtLog = DTable[0];
+    size_t errorCode;
+
+    /* Init */
+    BITv05_DStream_t bitD1;
+    BITv05_DStream_t bitD2;
+    BITv05_DStream_t bitD3;
+    BITv05_DStream_t bitD4;
+    const size_t length1 = MEM_readLE16(istart);
+    const size_t length2 = MEM_readLE16(istart+2);
+    const size_t length3 = MEM_readLE16(istart+4);
+    size_t length4;
+    const BYTE* const istart1 = istart + 6;  /* jumpTable */
+    const BYTE* const istart2 = istart1 + length1;
+    const BYTE* const istart3 = istart2 + length2;
+    const BYTE* const istart4 = istart3 + length3;
+    const size_t segmentSize = (dstSize+3) / 4;
+    BYTE* const opStart2 = ostart + segmentSize;
+    BYTE* const opStart3 = opStart2 + segmentSize;
+    BYTE* const opStart4 = opStart3 + segmentSize;
+    BYTE* op1 = ostart;
+    BYTE* op2 = opStart2;
+    BYTE* op3 = opStart3;
+    BYTE* op4 = opStart4;
+    U32 endSignal;
+
+    /* Check */
+    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
+
+    length4 = cSrcSize - (length1 + length2 + length3 + 6);
+    if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
+    errorCode = BITv05_initDStream(&bitD1, istart1, length1);
+    if (HUFv05_isError(errorCode)) return errorCode;
+    errorCode = BITv05_initDStream(&bitD2, istart2, length2);
+    if (HUFv05_isError(errorCode)) return errorCode;
+    errorCode = BITv05_initDStream(&bitD3, istart3, length3);
+    if (HUFv05_isError(errorCode)) return errorCode;
+    errorCode = BITv05_initDStream(&bitD4, istart4, length4);
+    if (HUFv05_isError(errorCode)) return errorCode;
+
+    /* 16-32 symbols per loop (4-8 symbols per stream) */
+    endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);
+    for ( ; (endSignal==BITv05_DStream_unfinished) && (op4<(oend-7)) ; ) {
+        HUFv05_DECODE_SYMBOLX2_2(op1, &bitD1);
+        HUFv05_DECODE_SYMBOLX2_2(op2, &bitD2);
+        HUFv05_DECODE_SYMBOLX2_2(op3, &bitD3);
+        HUFv05_DECODE_SYMBOLX2_2(op4, &bitD4);
+        HUFv05_DECODE_SYMBOLX2_1(op1, &bitD1);
+        HUFv05_DECODE_SYMBOLX2_1(op2, &bitD2);
+        HUFv05_DECODE_SYMBOLX2_1(op3, &bitD3);
+        HUFv05_DECODE_SYMBOLX2_1(op4, &bitD4);
+        HUFv05_DECODE_SYMBOLX2_2(op1, &bitD1);
+        HUFv05_DECODE_SYMBOLX2_2(op2, &bitD2);
+        HUFv05_DECODE_SYMBOLX2_2(op3, &bitD3);
+        HUFv05_DECODE_SYMBOLX2_2(op4, &bitD4);
+        HUFv05_DECODE_SYMBOLX2_0(op1, &bitD1);
+        HUFv05_DECODE_SYMBOLX2_0(op2, &bitD2);
+        HUFv05_DECODE_SYMBOLX2_0(op3, &bitD3);
+        HUFv05_DECODE_SYMBOLX2_0(op4, &bitD4);
+        endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);
+    }
+
+    /* check corruption */
+    if (op1 > opStart2) return ERROR(corruption_detected);
+    if (op2 > opStart3) return ERROR(corruption_detected);
+    if (op3 > opStart4) return ERROR(corruption_detected);
+    /* note : op4 supposed already verified within main loop */
+
+    /* finish bitStreams one by one */
+    HUFv05_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+    HUFv05_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+    HUFv05_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+    HUFv05_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
+
+    /* check */
+    endSignal = BITv05_endOfDStream(&bitD1) & BITv05_endOfDStream(&bitD2) & BITv05_endOfDStream(&bitD3) & BITv05_endOfDStream(&bitD4);
+    if (!endSignal) return ERROR(corruption_detected);
+
+    /* decoded size */
+    return dstSize;
+}
+
+
+size_t HUFv05_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUFv05_CREATE_STATIC_DTABLEX2(DTable, HUFv05_MAX_TABLELOG);
+    const BYTE* ip = (const BYTE*) cSrc;
+    size_t errorCode;
+
+    errorCode = HUFv05_readDTableX2 (DTable, cSrc, cSrcSize);
+    if (HUFv05_isError(errorCode)) return errorCode;
+    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += errorCode;
+    cSrcSize -= errorCode;
+
+    return HUFv05_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+/* *************************/
+/* double-symbols decoding */
+/* *************************/
+
+static void HUFv05_fillDTableX4Level2(HUFv05_DEltX4* DTable, U32 sizeLog, const U32 consumed,
+                           const U32* rankValOrigin, const int minWeight,
+                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
+                           U32 nbBitsBaseline, U16 baseSeq)
+{
+    HUFv05_DEltX4 DElt;
+    U32 rankVal[HUFv05_ABSOLUTEMAX_TABLELOG + 1];
+    U32 s;
+
+    /* get pre-calculated rankVal */
+    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+    /* fill skipped values */
+    if (minWeight>1) {
+        U32 i, skipSize = rankVal[minWeight];
+        MEM_writeLE16(&(DElt.sequence), baseSeq);
+        DElt.nbBits   = (BYTE)(consumed);
+        DElt.length   = 1;
+        for (i = 0; i < skipSize; i++)
+            DTable[i] = DElt;
+    }
+
+    /* fill DTable */
+    for (s=0; s<sortedListSize; s++) {   /* note : sortedSymbols already skipped */
+        const U32 symbol = sortedSymbols[s].symbol;
+        const U32 weight = sortedSymbols[s].weight;
+        const U32 nbBits = nbBitsBaseline - weight;
+        const U32 length = 1 << (sizeLog-nbBits);
+        const U32 start = rankVal[weight];
+        U32 i = start;
+        const U32 end = start + length;
+
+        MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
+        DElt.nbBits = (BYTE)(nbBits + consumed);
+        DElt.length = 2;
+        do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */
+
+        rankVal[weight] += length;
+    }
+}
+
+typedef U32 rankVal_t[HUFv05_ABSOLUTEMAX_TABLELOG][HUFv05_ABSOLUTEMAX_TABLELOG + 1];
+
+static void HUFv05_fillDTableX4(HUFv05_DEltX4* DTable, const U32 targetLog,
+                           const sortedSymbol_t* sortedList, const U32 sortedListSize,
+                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
+                           const U32 nbBitsBaseline)
+{
+    U32 rankVal[HUFv05_ABSOLUTEMAX_TABLELOG + 1];
+    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */
+    const U32 minBits  = nbBitsBaseline - maxWeight;
+    U32 s;
+
+    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+    /* fill DTable */
+    for (s=0; s<sortedListSize; s++) {
+        const U16 symbol = sortedList[s].symbol;
+        const U32 weight = sortedList[s].weight;
+        const U32 nbBits = nbBitsBaseline - weight;
+        const U32 start = rankVal[weight];
+        const U32 length = 1 << (targetLog-nbBits);
+
+        if (targetLog-nbBits >= minBits) {   /* enough room for a second symbol */
+            U32 sortedRank;
+            int minWeight = nbBits + scaleLog;
+            if (minWeight < 1) minWeight = 1;
+            sortedRank = rankStart[minWeight];
+            HUFv05_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
+                           rankValOrigin[nbBits], minWeight,
+                           sortedList+sortedRank, sortedListSize-sortedRank,
+                           nbBitsBaseline, symbol);
+        } else {
+            U32 i;
+            const U32 end = start + length;
+            HUFv05_DEltX4 DElt;
+
+            MEM_writeLE16(&(DElt.sequence), symbol);
+            DElt.nbBits   = (BYTE)(nbBits);
+            DElt.length   = 1;
+            for (i = start; i < end; i++)
+                DTable[i] = DElt;
+        }
+        rankVal[weight] += length;
+    }
+}
+
+size_t HUFv05_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
+{
+    BYTE weightList[HUFv05_MAX_SYMBOL_VALUE + 1];
+    sortedSymbol_t sortedSymbol[HUFv05_MAX_SYMBOL_VALUE + 1];
+    U32 rankStats[HUFv05_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
+    U32 rankStart0[HUFv05_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
+    U32* const rankStart = rankStart0+1;
+    rankVal_t rankVal;
+    U32 tableLog, maxW, sizeOfSort, nbSymbols;
+    const U32 memLog = DTable[0];
+    size_t iSize;
+    void* dtPtr = DTable;
+    HUFv05_DEltX4* const dt = ((HUFv05_DEltX4*)dtPtr) + 1;
+
+    HUFv05_STATIC_ASSERT(sizeof(HUFv05_DEltX4) == sizeof(U32));   /* if compilation fails here, assertion is false */
+    if (memLog > HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
+    //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */
+
+    iSize = HUFv05_readStats(weightList, HUFv05_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
+    if (HUFv05_isError(iSize)) return iSize;
+
+    /* check result */
+    if (tableLog > memLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */
+
+    /* find maxWeight */
+    for (maxW = tableLog; rankStats[maxW]==0; maxW--) {}  /* necessarily finds a solution before 0 */
+
+    /* Get start index of each weight */
+    {
+        U32 w, nextRankStart = 0;
+        for (w=1; w<=maxW; w++) {
+            U32 current = nextRankStart;
+            nextRankStart += rankStats[w];
+            rankStart[w] = current;
+        }
+        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/
+        sizeOfSort = nextRankStart;
+    }
+
+    /* sort symbols by weight */
+    {
+        U32 s;
+        for (s=0; s<nbSymbols; s++) {
+            U32 w = weightList[s];
+            U32 r = rankStart[w]++;
+            sortedSymbol[r].symbol = (BYTE)s;
+            sortedSymbol[r].weight = (BYTE)w;
+        }
+        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
+    }
+
+    /* Build rankVal */
+    {
+        const U32 minBits = tableLog+1 - maxW;
+        U32 nextRankVal = 0;
+        U32 w, consumed;
+        const int rescale = (memLog-tableLog) - 1;   /* tableLog <= memLog */
+        U32* rankVal0 = rankVal[0];
+        for (w=1; w<=maxW; w++) {
+            U32 current = nextRankVal;
+            nextRankVal += rankStats[w] << (w+rescale);
+            rankVal0[w] = current;
+        }
+        for (consumed = minBits; consumed <= memLog - minBits; consumed++) {
+            U32* rankValPtr = rankVal[consumed];
+            for (w = 1; w <= maxW; w++) {
+                rankValPtr[w] = rankVal0[w] >> consumed;
+    }   }   }
+
+    HUFv05_fillDTableX4(dt, memLog,
+                   sortedSymbol, sizeOfSort,
+                   rankStart0, rankVal, maxW,
+                   tableLog+1);
+
+    return iSize;
+}
+
+
+static U32 HUFv05_decodeSymbolX4(void* op, BITv05_DStream_t* DStream, const HUFv05_DEltX4* dt, const U32 dtLog)
+{
+    const size_t val = BITv05_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    memcpy(op, dt+val, 2);
+    BITv05_skipBits(DStream, dt[val].nbBits);
+    return dt[val].length;
+}
+
+static U32 HUFv05_decodeLastSymbolX4(void* op, BITv05_DStream_t* DStream, const HUFv05_DEltX4* dt, const U32 dtLog)
+{
+    const size_t val = BITv05_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    memcpy(op, dt+val, 1);
+    if (dt[val].length==1) BITv05_skipBits(DStream, dt[val].nbBits);
+    else {
+        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
+            BITv05_skipBits(DStream, dt[val].nbBits);
+            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+    }   }
+    return 1;
+}
+
+
+#define HUFv05_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
+    ptr += HUFv05_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUFv05_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
+    if (MEM_64bits() || (HUFv05_MAX_TABLELOG<=12)) \
+        ptr += HUFv05_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUFv05_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
+    if (MEM_64bits()) \
+        ptr += HUFv05_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+static inline size_t HUFv05_decodeStreamX4(BYTE* p, BITv05_DStream_t* bitDPtr, BYTE* const pEnd, const HUFv05_DEltX4* const dt, const U32 dtLog)
+{
+    BYTE* const pStart = p;
+
+    /* up to 8 symbols at a time */
+    while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p < pEnd-7)) {
+        HUFv05_DECODE_SYMBOLX4_2(p, bitDPtr);
+        HUFv05_DECODE_SYMBOLX4_1(p, bitDPtr);
+        HUFv05_DECODE_SYMBOLX4_2(p, bitDPtr);
+        HUFv05_DECODE_SYMBOLX4_0(p, bitDPtr);
+    }
+
+    /* closer to the end */
+    while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p <= pEnd-2))
+        HUFv05_DECODE_SYMBOLX4_0(p, bitDPtr);
+
+    while (p <= pEnd-2)
+        HUFv05_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
+
+    if (p < pEnd)
+        p += HUFv05_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
+
+    return p-pStart;
+}
+
+
+size_t HUFv05_decompress1X4_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const U32* DTable)
+{
+    const BYTE* const istart = (const BYTE*) cSrc;
+    BYTE* const ostart = (BYTE*) dst;
+    BYTE* const oend = ostart + dstSize;
+
+    const U32 dtLog = DTable[0];
+    const void* const dtPtr = DTable;
+    const HUFv05_DEltX4* const dt = ((const HUFv05_DEltX4*)dtPtr) +1;
+    size_t errorCode;
+
+    /* Init */
+    BITv05_DStream_t bitD;
+    errorCode = BITv05_initDStream(&bitD, istart, cSrcSize);
+    if (HUFv05_isError(errorCode)) return errorCode;
+
+    /* finish bitStreams one by one */
+    HUFv05_decodeStreamX4(ostart, &bitD, oend,     dt, dtLog);
+
+    /* check */
+    if (!BITv05_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+    /* decoded size */
+    return dstSize;
+}
+
+size_t HUFv05_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUFv05_CREATE_STATIC_DTABLEX4(DTable, HUFv05_MAX_TABLELOG);
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t hSize = HUFv05_readDTableX4 (DTable, cSrc, cSrcSize);
+    if (HUFv05_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize;
+    cSrcSize -= hSize;
+
+    return HUFv05_decompress1X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+size_t HUFv05_decompress4X4_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const U32* DTable)
+{
+    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
+
+    {
+        const BYTE* const istart = (const BYTE*) cSrc;
+        BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+        const void* const dtPtr = DTable;
+        const HUFv05_DEltX4* const dt = ((const HUFv05_DEltX4*)dtPtr) +1;
+        const U32 dtLog = DTable[0];
+        size_t errorCode;
+
+        /* Init */
+        BITv05_DStream_t bitD1;
+        BITv05_DStream_t bitD2;
+        BITv05_DStream_t bitD3;
+        BITv05_DStream_t bitD4;
+        const size_t length1 = MEM_readLE16(istart);
+        const size_t length2 = MEM_readLE16(istart+2);
+        const size_t length3 = MEM_readLE16(istart+4);
+        size_t length4;
+        const BYTE* const istart1 = istart + 6;  /* jumpTable */
+        const BYTE* const istart2 = istart1 + length1;
+        const BYTE* const istart3 = istart2 + length2;
+        const BYTE* const istart4 = istart3 + length3;
+        const size_t segmentSize = (dstSize+3) / 4;
+        BYTE* const opStart2 = ostart + segmentSize;
+        BYTE* const opStart3 = opStart2 + segmentSize;
+        BYTE* const opStart4 = opStart3 + segmentSize;
+        BYTE* op1 = ostart;
+        BYTE* op2 = opStart2;
+        BYTE* op3 = opStart3;
+        BYTE* op4 = opStart4;
+        U32 endSignal;
+
+        length4 = cSrcSize - (length1 + length2 + length3 + 6);
+        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
+        errorCode = BITv05_initDStream(&bitD1, istart1, length1);
+        if (HUFv05_isError(errorCode)) return errorCode;
+        errorCode = BITv05_initDStream(&bitD2, istart2, length2);
+        if (HUFv05_isError(errorCode)) return errorCode;
+        errorCode = BITv05_initDStream(&bitD3, istart3, length3);
+        if (HUFv05_isError(errorCode)) return errorCode;
+        errorCode = BITv05_initDStream(&bitD4, istart4, length4);
+        if (HUFv05_isError(errorCode)) return errorCode;
+
+        /* 16-32 symbols per loop (4-8 symbols per stream) */
+        endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);
+        for ( ; (endSignal==BITv05_DStream_unfinished) && (op4<(oend-7)) ; ) {
+            HUFv05_DECODE_SYMBOLX4_2(op1, &bitD1);
+            HUFv05_DECODE_SYMBOLX4_2(op2, &bitD2);
+            HUFv05_DECODE_SYMBOLX4_2(op3, &bitD3);
+            HUFv05_DECODE_SYMBOLX4_2(op4, &bitD4);
+            HUFv05_DECODE_SYMBOLX4_1(op1, &bitD1);
+            HUFv05_DECODE_SYMBOLX4_1(op2, &bitD2);
+            HUFv05_DECODE_SYMBOLX4_1(op3, &bitD3);
+            HUFv05_DECODE_SYMBOLX4_1(op4, &bitD4);
+            HUFv05_DECODE_SYMBOLX4_2(op1, &bitD1);
+            HUFv05_DECODE_SYMBOLX4_2(op2, &bitD2);
+            HUFv05_DECODE_SYMBOLX4_2(op3, &bitD3);
+            HUFv05_DECODE_SYMBOLX4_2(op4, &bitD4);
+            HUFv05_DECODE_SYMBOLX4_0(op1, &bitD1);
+            HUFv05_DECODE_SYMBOLX4_0(op2, &bitD2);
+            HUFv05_DECODE_SYMBOLX4_0(op3, &bitD3);
+            HUFv05_DECODE_SYMBOLX4_0(op4, &bitD4);
+
+            endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);
+        }
+
+        /* check corruption */
+        if (op1 > opStart2) return ERROR(corruption_detected);
+        if (op2 > opStart3) return ERROR(corruption_detected);
+        if (op3 > opStart4) return ERROR(corruption_detected);
+        /* note : op4 supposed already verified within main loop */
+
+        /* finish bitStreams one by one */
+        HUFv05_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
+        HUFv05_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
+        HUFv05_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
+        HUFv05_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);
+
+        /* check */
+        endSignal = BITv05_endOfDStream(&bitD1) & BITv05_endOfDStream(&bitD2) & BITv05_endOfDStream(&bitD3) & BITv05_endOfDStream(&bitD4);
+        if (!endSignal) return ERROR(corruption_detected);
+
+        /* decoded size */
+        return dstSize;
+    }
+}
+
+
+size_t HUFv05_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUFv05_CREATE_STATIC_DTABLEX4(DTable, HUFv05_MAX_TABLELOG);
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t hSize = HUFv05_readDTableX4 (DTable, cSrc, cSrcSize);
+    if (HUFv05_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize;
+    cSrcSize -= hSize;
+
+    return HUFv05_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+/* ********************************/
+/* Generic decompression selector */
+/* ********************************/
+
+typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
+static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
+{
+    /* single, double, quad */
+    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */
+    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */
+    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */
+    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */
+    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */
+    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */
+    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */
+    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */
+    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */
+    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */
+    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
+    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
+    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
+    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */
+    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */
+    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */
+};
+
+typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+
+size_t HUFv05_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    static const decompressionAlgo decompress[3] = { HUFv05_decompress4X2, HUFv05_decompress4X4, NULL };
+    /* estimate decompression time */
+    U32 Q;
+    const U32 D256 = (U32)(dstSize >> 8);
+    U32 Dtime[3];
+    U32 algoNb = 0;
+    int n;
+
+    /* validation checks */
+    if (dstSize == 0) return ERROR(dstSize_tooSmall);
+    if (cSrcSize >= dstSize) return ERROR(corruption_detected);   /* invalid, or not compressed, but not compressed already dealt with */
+    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
+
+    /* decoder timing evaluation */
+    Q = (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 since dstSize > cSrcSize */
+    for (n=0; n<3; n++)
+        Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
+
+    Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
+
+    if (Dtime[1] < Dtime[0]) algoNb = 1;
+
+    return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
+
+    //return HUFv05_decompress4X2(dst, dstSize, cSrc, cSrcSize);   /* multi-streams single-symbol decoding */
+    //return HUFv05_decompress4X4(dst, dstSize, cSrc, cSrcSize);   /* multi-streams double-symbols decoding */
+    //return HUFv05_decompress4X6(dst, dstSize, cSrc, cSrcSize);   /* multi-streams quad-symbols decoding */
+}
+/*
+    zstd - standard compression library
+    Copyright (C) 2014-2016, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd source repository : https://github.com/Cyan4973/zstd
+*/
+
+/* ***************************************************************
+*  Tuning parameters
+*****************************************************************/
+/*!
+ * HEAPMODE :
+ * Select how default decompression function ZSTDv05_decompress() will allocate memory,
+ * in memory stack (0), or in memory heap (1, requires malloc())
+ */
+#ifndef ZSTDv05_HEAPMODE
+#  define ZSTDv05_HEAPMODE 1
+#endif
+
+
+/*-*******************************************************
+*  Dependencies
+*********************************************************/
+#include <stdlib.h>      /* calloc */
+#include <string.h>      /* memcpy, memmove */
+#include <stdio.h>       /* debug only : printf */
+
+
+/*-*******************************************************
+*  Compiler specifics
+*********************************************************/
+#ifdef _MSC_VER    /* Visual Studio */
+#  include <intrin.h>                    /* For Visual 2005 */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */
+#endif
+
+
+/*-*************************************
+*  Local types
+***************************************/
+typedef struct
+{
+    blockType_t blockType;
+    U32 origSize;
+} blockProperties_t;
+
+
+/* *******************************************************
+*  Memory operations
+**********************************************************/
+static void ZSTDv05_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
+
+
+/* *************************************
+*  Error Management
+***************************************/
+/*! ZSTDv05_isError() :
+*   tells if a return value is an error code */
+unsigned ZSTDv05_isError(size_t code) { return ERR_isError(code); }
+
+
+/*! ZSTDv05_getErrorName() :
+*   provides error code string (useful for debugging) */
+const char* ZSTDv05_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/* *************************************************************
+*   Context management
+***************************************************************/
+typedef enum { ZSTDv05ds_getFrameHeaderSize, ZSTDv05ds_decodeFrameHeader,
+               ZSTDv05ds_decodeBlockHeader, ZSTDv05ds_decompressBlock } ZSTDv05_dStage;
+
+struct ZSTDv05_DCtx_s
+{
+    FSEv05_DTable LLTable[FSEv05_DTABLE_SIZE_U32(LLFSEv05Log)];
+    FSEv05_DTable OffTable[FSEv05_DTABLE_SIZE_U32(OffFSEv05Log)];
+    FSEv05_DTable MLTable[FSEv05_DTABLE_SIZE_U32(MLFSEv05Log)];
+    unsigned   hufTableX4[HUFv05_DTABLE_SIZE(HufLog)];
+    const void* previousDstEnd;
+    const void* base;
+    const void* vBase;
+    const void* dictEnd;
+    size_t expected;
+    size_t headerSize;
+    ZSTDv05_parameters params;
+    blockType_t bType;   /* used in ZSTDv05_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
+    ZSTDv05_dStage stage;
+    U32 flagStaticTables;
+    const BYTE* litPtr;
+    size_t litSize;
+    BYTE litBuffer[BLOCKSIZE + WILDCOPY_OVERLENGTH];
+    BYTE headerBuffer[ZSTDv05_frameHeaderSize_max];
+};  /* typedef'd to ZSTDv05_DCtx within "zstd_static.h" */
+
+size_t ZSTDv05_sizeofDCtx (void) { return sizeof(ZSTDv05_DCtx); }
+
+size_t ZSTDv05_decompressBegin(ZSTDv05_DCtx* dctx)
+{
+    dctx->expected = ZSTDv05_frameHeaderSize_min;
+    dctx->stage = ZSTDv05ds_getFrameHeaderSize;
+    dctx->previousDstEnd = NULL;
+    dctx->base = NULL;
+    dctx->vBase = NULL;
+    dctx->dictEnd = NULL;
+    dctx->hufTableX4[0] = HufLog;
+    dctx->flagStaticTables = 0;
+    return 0;
+}
+
+ZSTDv05_DCtx* ZSTDv05_createDCtx(void)
+{
+    ZSTDv05_DCtx* dctx = (ZSTDv05_DCtx*)malloc(sizeof(ZSTDv05_DCtx));
+    if (dctx==NULL) return NULL;
+    ZSTDv05_decompressBegin(dctx);
+    return dctx;
+}
+
+size_t ZSTDv05_freeDCtx(ZSTDv05_DCtx* dctx)
+{
+    free(dctx);
+    return 0;   /* reserved as a potential error code in the future */
+}
+
+void ZSTDv05_copyDCtx(ZSTDv05_DCtx* dstDCtx, const ZSTDv05_DCtx* srcDCtx)
+{
+    memcpy(dstDCtx, srcDCtx,
+           sizeof(ZSTDv05_DCtx) - (BLOCKSIZE+WILDCOPY_OVERLENGTH + ZSTDv05_frameHeaderSize_max));  /* no need to copy workspace */
+}
+
+
+/* *************************************************************
+*   Decompression section
+***************************************************************/
+
+/* Frame format description
+   Frame Header -  [ Block Header - Block ] - Frame End
+   1) Frame Header
+      - 4 bytes - Magic Number : ZSTDv05_MAGICNUMBER (defined within zstd_internal.h)
+      - 1 byte  - Window Descriptor
+   2) Block Header
+      - 3 bytes, starting with a 2-bits descriptor
+                 Uncompressed, Compressed, Frame End, unused
+   3) Block
+      See Block Format Description
+   4) Frame End
+      - 3 bytes, compatible with Block Header
+*/
+
+/* Block format description
+
+   Block = Literal Section - Sequences Section
+   Prerequisite : size of (compressed) block, maximum size of regenerated data
+
+   1) Literal Section
+
+   1.1) Header : 1-5 bytes
+        flags: 2 bits
+            00 compressed by Huff0
+            01 unused
+            10 is Raw (uncompressed)
+            11 is Rle
+            Note : using 01 => Huff0 with precomputed table ?
+            Note : delta map ? => compressed ?
+
+   1.1.1) Huff0-compressed literal block : 3-5 bytes
+            srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
+            srcSize < 1 KB => 3 bytes (2-2-10-10)
+            srcSize < 16KB => 4 bytes (2-2-14-14)
+            else           => 5 bytes (2-2-18-18)
+            big endian convention
+
+   1.1.2) Raw (uncompressed) literal block header : 1-3 bytes
+        size :  5 bits: (IS_RAW<<6) + (0<<4) + size
+               12 bits: (IS_RAW<<6) + (2<<4) + (size>>8)
+                        size&255
+               20 bits: (IS_RAW<<6) + (3<<4) + (size>>16)
+                        size>>8&255
+                        size&255
+
+   1.1.3) Rle (repeated single byte) literal block header : 1-3 bytes
+        size :  5 bits: (IS_RLE<<6) + (0<<4) + size
+               12 bits: (IS_RLE<<6) + (2<<4) + (size>>8)
+                        size&255
+               20 bits: (IS_RLE<<6) + (3<<4) + (size>>16)
+                        size>>8&255
+                        size&255
+
+   1.1.4) Huff0-compressed literal block, using precomputed CTables : 3-5 bytes
+            srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
+            srcSize < 1 KB => 3 bytes (2-2-10-10)
+            srcSize < 16KB => 4 bytes (2-2-14-14)
+            else           => 5 bytes (2-2-18-18)
+            big endian convention
+
+        1- CTable available (stored into workspace ?)
+        2- Small input (fast heuristic ? Full comparison ? depend on clevel ?)
+
+
+   1.2) Literal block content
+
+   1.2.1) Huff0 block, using sizes from header
+        See Huff0 format
+
+   1.2.2) Huff0 block, using prepared table
+
+   1.2.3) Raw content
+
+   1.2.4) single byte
+
+
+   2) Sequences section
+      TO DO
+*/
+
+
+/** ZSTDv05_decodeFrameHeader_Part1() :
+*   decode the 1st part of the Frame Header, which tells Frame Header size.
+*   srcSize must be == ZSTDv05_frameHeaderSize_min.
+*   @return : the full size of the Frame Header */
+static size_t ZSTDv05_decodeFrameHeader_Part1(ZSTDv05_DCtx* zc, const void* src, size_t srcSize)
+{
+    U32 magicNumber;
+    if (srcSize != ZSTDv05_frameHeaderSize_min)
+        return ERROR(srcSize_wrong);
+    magicNumber = MEM_readLE32(src);
+    if (magicNumber != ZSTDv05_MAGICNUMBER) return ERROR(prefix_unknown);
+    zc->headerSize = ZSTDv05_frameHeaderSize_min;
+    return zc->headerSize;
+}
+
+
+size_t ZSTDv05_getFrameParams(ZSTDv05_parameters* params, const void* src, size_t srcSize)
+{
+    U32 magicNumber;
+    if (srcSize < ZSTDv05_frameHeaderSize_min) return ZSTDv05_frameHeaderSize_max;
+    magicNumber = MEM_readLE32(src);
+    if (magicNumber != ZSTDv05_MAGICNUMBER) return ERROR(prefix_unknown);
+    memset(params, 0, sizeof(*params));
+    params->windowLog = (((const BYTE*)src)[4] & 15) + ZSTDv05_WINDOWLOG_ABSOLUTEMIN;
+    if ((((const BYTE*)src)[4] >> 4) != 0) return ERROR(frameParameter_unsupported);   /* reserved bits */
+    return 0;
+}
+
+/** ZSTDv05_decodeFrameHeader_Part2() :
+*   decode the full Frame Header.
+*   srcSize must be the size provided by ZSTDv05_decodeFrameHeader_Part1().
+*   @return : 0, or an error code, which can be tested using ZSTDv05_isError() */
+static size_t ZSTDv05_decodeFrameHeader_Part2(ZSTDv05_DCtx* zc, const void* src, size_t srcSize)
+{
+    size_t result;
+    if (srcSize != zc->headerSize)
+        return ERROR(srcSize_wrong);
+    result = ZSTDv05_getFrameParams(&(zc->params), src, srcSize);
+    if ((MEM_32bits()) && (zc->params.windowLog > 25)) return ERROR(frameParameter_unsupported);
+    return result;
+}
+
+
+size_t ZSTDv05_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
+{
+    const BYTE* const in = (const BYTE* const)src;
+    BYTE headerFlags;
+    U32 cSize;
+
+    if (srcSize < 3)
+        return ERROR(srcSize_wrong);
+
+    headerFlags = *in;
+    cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
+
+    bpPtr->blockType = (blockType_t)(headerFlags >> 6);
+    bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
+
+    if (bpPtr->blockType == bt_end) return 0;
+    if (bpPtr->blockType == bt_rle) return 1;
+    return cSize;
+}
+
+
+static size_t ZSTDv05_copyRawBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
+    memcpy(dst, src, srcSize);
+    return srcSize;
+}
+
+
+/*! ZSTDv05_decodeLiteralsBlock() :
+    @return : nb of bytes read from src (< srcSize ) */
+size_t ZSTDv05_decodeLiteralsBlock(ZSTDv05_DCtx* dctx,
+                          const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
+{
+    const BYTE* const istart = (const BYTE*) src;
+
+    /* any compressed block with literals segment must be at least this size */
+    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
+
+    switch(istart[0]>> 6)
+    {
+    case IS_HUFv05:
+        {
+            size_t litSize, litCSize, singleStream=0;
+            U32 lhSize = ((istart[0]) >> 4) & 3;
+            if (srcSize < 5) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
+            switch(lhSize)
+            {
+            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */
+                /* 2 - 2 - 10 - 10 */
+                lhSize=3;
+                singleStream = istart[0] & 16;
+                litSize  = ((istart[0] & 15) << 6) + (istart[1] >> 2);
+                litCSize = ((istart[1] &  3) << 8) + istart[2];
+                break;
+            case 2:
+                /* 2 - 2 - 14 - 14 */
+                lhSize=4;
+                litSize  = ((istart[0] & 15) << 10) + (istart[1] << 2) + (istart[2] >> 6);
+                litCSize = ((istart[2] & 63) <<  8) + istart[3];
+                break;
+            case 3:
+                /* 2 - 2 - 18 - 18 */
+                lhSize=5;
+                litSize  = ((istart[0] & 15) << 14) + (istart[1] << 6) + (istart[2] >> 2);
+                litCSize = ((istart[2] &  3) << 16) + (istart[3] << 8) + istart[4];
+                break;
+            }
+            if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
+            if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
+
+            if (HUFv05_isError(singleStream ?
+                            HUFv05_decompress1X2(dctx->litBuffer, litSize, istart+lhSize, litCSize) :
+                            HUFv05_decompress   (dctx->litBuffer, litSize, istart+lhSize, litCSize) ))
+                return ERROR(corruption_detected);
+
+            dctx->litPtr = dctx->litBuffer;
+            dctx->litSize = litSize;
+            memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+            return litCSize + lhSize;
+        }
+    case IS_PCH:
+        {
+            size_t errorCode;
+            size_t litSize, litCSize;
+            U32 lhSize = ((istart[0]) >> 4) & 3;
+            if (lhSize != 1)  /* only case supported for now : small litSize, single stream */
+                return ERROR(corruption_detected);
+            if (!dctx->flagStaticTables)
+                return ERROR(dictionary_corrupted);
+
+            /* 2 - 2 - 10 - 10 */
+            lhSize=3;
+            litSize  = ((istart[0] & 15) << 6) + (istart[1] >> 2);
+            litCSize = ((istart[1] &  3) << 8) + istart[2];
+            if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
+
+            errorCode = HUFv05_decompress1X4_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->hufTableX4);
+            if (HUFv05_isError(errorCode)) return ERROR(corruption_detected);
+
+            dctx->litPtr = dctx->litBuffer;
+            dctx->litSize = litSize;
+            memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+            return litCSize + lhSize;
+        }
+    case IS_RAW:
+        {
+            size_t litSize;
+            U32 lhSize = ((istart[0]) >> 4) & 3;
+            switch(lhSize)
+            {
+            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */
+                lhSize=1;
+                litSize = istart[0] & 31;
+                break;
+            case 2:
+                litSize = ((istart[0] & 15) << 8) + istart[1];
+                break;
+            case 3:
+                litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
+                break;
+            }
+
+            if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) {  /* risk reading beyond src buffer with wildcopy */
+                if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
+                memcpy(dctx->litBuffer, istart+lhSize, litSize);
+                dctx->litPtr = dctx->litBuffer;
+                dctx->litSize = litSize;
+                memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+                return lhSize+litSize;
+            }
+            /* direct reference into compressed stream */
+            dctx->litPtr = istart+lhSize;
+            dctx->litSize = litSize;
+            return lhSize+litSize;
+        }
+    case IS_RLE:
+        {
+            size_t litSize;
+            U32 lhSize = ((istart[0]) >> 4) & 3;
+            switch(lhSize)
+            {
+            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */
+                lhSize = 1;
+                litSize = istart[0] & 31;
+                break;
+            case 2:
+                litSize = ((istart[0] & 15) << 8) + istart[1];
+                break;
+            case 3:
+                litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
+                if (srcSize<4) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
+                break;
+            }
+            if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
+            memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
+            dctx->litPtr = dctx->litBuffer;
+            dctx->litSize = litSize;
+            return lhSize+1;
+        }
+    default:
+        return ERROR(corruption_detected);   /* impossible */
+    }
+}
+
+
+size_t ZSTDv05_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
+                         FSEv05_DTable* DTableLL, FSEv05_DTable* DTableML, FSEv05_DTable* DTableOffb,
+                         const void* src, size_t srcSize, U32 flagStaticTable)
+{
+    const BYTE* const istart = (const BYTE* const)src;
+    const BYTE* ip = istart;
+    const BYTE* const iend = istart + srcSize;
+    U32 LLtype, Offtype, MLtype;
+    U32 LLlog, Offlog, MLlog;
+    size_t dumpsLength;
+
+    /* check */
+    if (srcSize < MIN_SEQUENCES_SIZE)
+        return ERROR(srcSize_wrong);
+
+    /* SeqHead */
+    *nbSeq = *ip++;
+    if (*nbSeq==0) return 1;
+    if (*nbSeq >= 128) {
+        if (ip >= iend) return ERROR(srcSize_wrong);
+        *nbSeq = ((nbSeq[0]-128)<<8) + *ip++;
+    }
+
+    if (ip >= iend) return ERROR(srcSize_wrong);
+    LLtype  = *ip >> 6;
+    Offtype = (*ip >> 4) & 3;
+    MLtype  = (*ip >> 2) & 3;
+    if (*ip & 2) {
+        if (ip+3 > iend) return ERROR(srcSize_wrong);
+        dumpsLength  = ip[2];
+        dumpsLength += ip[1] << 8;
+        ip += 3;
+    } else {
+        if (ip+2 > iend) return ERROR(srcSize_wrong);
+        dumpsLength  = ip[1];
+        dumpsLength += (ip[0] & 1) << 8;
+        ip += 2;
+    }
+    *dumpsPtr = ip;
+    ip += dumpsLength;
+    *dumpsLengthPtr = dumpsLength;
+
+    /* check */
+    if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
+
+    /* sequences */
+    {
+        S16 norm[MaxML+1];    /* assumption : MaxML >= MaxLL >= MaxOff */
+        size_t headerSize;
+
+        /* Build DTables */
+        switch(LLtype)
+        {
+        case FSEv05_ENCODING_RLE :
+            LLlog = 0;
+            FSEv05_buildDTable_rle(DTableLL, *ip++);
+            break;
+        case FSEv05_ENCODING_RAW :
+            LLlog = LLbits;
+            FSEv05_buildDTable_raw(DTableLL, LLbits);
+            break;
+        case FSEv05_ENCODING_STATIC:
+            if (!flagStaticTable) return ERROR(corruption_detected);
+            break;
+        case FSEv05_ENCODING_DYNAMIC :
+        default :   /* impossible */
+            {   U32 max = MaxLL;
+                headerSize = FSEv05_readNCount(norm, &max, &LLlog, ip, iend-ip);
+                if (FSEv05_isError(headerSize)) return ERROR(GENERIC);
+                if (LLlog > LLFSEv05Log) return ERROR(corruption_detected);
+                ip += headerSize;
+                FSEv05_buildDTable(DTableLL, norm, max, LLlog);
+        }   }
+
+        switch(Offtype)
+        {
+        case FSEv05_ENCODING_RLE :
+            Offlog = 0;
+            if (ip > iend-2) return ERROR(srcSize_wrong);   /* min : "raw", hence no header, but at least xxLog bits */
+            FSEv05_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */
+            break;
+        case FSEv05_ENCODING_RAW :
+            Offlog = Offbits;
+            FSEv05_buildDTable_raw(DTableOffb, Offbits);
+            break;
+        case FSEv05_ENCODING_STATIC:
+            if (!flagStaticTable) return ERROR(corruption_detected);
+            break;
+        case FSEv05_ENCODING_DYNAMIC :
+        default :   /* impossible */
+            {   U32 max = MaxOff;
+                headerSize = FSEv05_readNCount(norm, &max, &Offlog, ip, iend-ip);
+                if (FSEv05_isError(headerSize)) return ERROR(GENERIC);
+                if (Offlog > OffFSEv05Log) return ERROR(corruption_detected);
+                ip += headerSize;
+                FSEv05_buildDTable(DTableOffb, norm, max, Offlog);
+        }   }
+
+        switch(MLtype)
+        {
+        case FSEv05_ENCODING_RLE :
+            MLlog = 0;
+            if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
+            FSEv05_buildDTable_rle(DTableML, *ip++);
+            break;
+        case FSEv05_ENCODING_RAW :
+            MLlog = MLbits;
+            FSEv05_buildDTable_raw(DTableML, MLbits);
+            break;
+        case FSEv05_ENCODING_STATIC:
+            if (!flagStaticTable) return ERROR(corruption_detected);
+            break;
+        case FSEv05_ENCODING_DYNAMIC :
+        default :   /* impossible */
+            {   U32 max = MaxML;
+                headerSize = FSEv05_readNCount(norm, &max, &MLlog, ip, iend-ip);
+                if (FSEv05_isError(headerSize)) return ERROR(GENERIC);
+                if (MLlog > MLFSEv05Log) return ERROR(corruption_detected);
+                ip += headerSize;
+                FSEv05_buildDTable(DTableML, norm, max, MLlog);
+    }   }   }
+
+    return ip-istart;
+}
+
+
+typedef struct {
+    size_t litLength;
+    size_t matchLength;
+    size_t offset;
+} seq_t;
+
+typedef struct {
+    BITv05_DStream_t DStream;
+    FSEv05_DState_t stateLL;
+    FSEv05_DState_t stateOffb;
+    FSEv05_DState_t stateML;
+    size_t prevOffset;
+    const BYTE* dumps;
+    const BYTE* dumpsEnd;
+} seqState_t;
+
+
+
+static void ZSTDv05_decodeSequence(seq_t* seq, seqState_t* seqState)
+{
+    size_t litLength;
+    size_t prevOffset;
+    size_t offset;
+    size_t matchLength;
+    const BYTE* dumps = seqState->dumps;
+    const BYTE* const de = seqState->dumpsEnd;
+
+    /* Literal length */
+    litLength = FSEv05_peakSymbol(&(seqState->stateLL));
+    prevOffset = litLength ? seq->offset : seqState->prevOffset;
+    if (litLength == MaxLL) {
+        U32 add = *dumps++;
+        if (add < 255) litLength += add;
+        else {
+            litLength = MEM_readLE32(dumps) & 0xFFFFFF;  /* no risk : dumps is always followed by seq tables > 1 byte */
+            if (litLength&1) litLength>>=1, dumps += 3;
+            else litLength = (U16)(litLength)>>1, dumps += 2;
+        }
+        if (dumps > de) { litLength = MaxLL+255; }  /* late correction, to avoid using uninitialized memory */
+        if (dumps >= de) { dumps = de-1; }  /* late correction, to avoid read overflow (data is now corrupted anyway) */
+    }
+
+    /* Offset */
+    {
+        static const U32 offsetPrefix[MaxOff+1] = {
+                1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,
+                512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,
+                524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };
+        U32 offsetCode = FSEv05_peakSymbol(&(seqState->stateOffb));   /* <= maxOff, by table construction */
+        U32 nbBits = offsetCode - 1;
+        if (offsetCode==0) nbBits = 0;   /* cmove */
+        offset = offsetPrefix[offsetCode] + BITv05_readBits(&(seqState->DStream), nbBits);
+        if (MEM_32bits()) BITv05_reloadDStream(&(seqState->DStream));
+        if (offsetCode==0) offset = prevOffset;   /* repcode, cmove */
+        if (offsetCode | !litLength) seqState->prevOffset = seq->offset;   /* cmove */
+        FSEv05_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream));    /* update */
+    }
+
+    /* Literal length update */
+    FSEv05_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));   /* update */
+    if (MEM_32bits()) BITv05_reloadDStream(&(seqState->DStream));
+
+    /* MatchLength */
+    matchLength = FSEv05_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
+    if (matchLength == MaxML) {
+        U32 add = *dumps++;
+        if (add < 255) matchLength += add;
+        else {
+            matchLength = MEM_readLE32(dumps) & 0xFFFFFF;  /* no pb : dumps is always followed by seq tables > 1 byte */
+            if (matchLength&1) matchLength>>=1, dumps += 3;
+            else matchLength = (U16)(matchLength)>>1, dumps += 2;
+        }
+        if (dumps > de) { matchLength = MaxML+255; }  /* late correction, to avoid using uninitialized memory */
+        if (dumps >= de) { dumps = de-1; }  /* late correction, to avoid read overflow (data is now corrupted anyway) */
+    }
+    matchLength += MINMATCH;
+
+    /* save result */
+    seq->litLength = litLength;
+    seq->offset = offset;
+    seq->matchLength = matchLength;
+    seqState->dumps = dumps;
+
+#if 0   /* debug */
+    {
+        static U64 totalDecoded = 0;
+        printf("pos %6u : %3u literals & match %3u bytes at distance %6u \n",
+           (U32)(totalDecoded), (U32)litLength, (U32)matchLength, (U32)offset);
+        totalDecoded += litLength + matchLength;
+    }
+#endif
+}
+
+
+static size_t ZSTDv05_execSequence(BYTE* op,
+                                BYTE* const oend, seq_t sequence,
+                                const BYTE** litPtr, const BYTE* const litLimit,
+                                const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
+{
+    static const int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
+    static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* substracted */
+    BYTE* const oLitEnd = op + sequence.litLength;
+    const size_t sequenceLength = sequence.litLength + sequence.matchLength;
+    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
+    BYTE* const oend_8 = oend-8;
+    const BYTE* const litEnd = *litPtr + sequence.litLength;
+    const BYTE* match = oLitEnd - sequence.offset;
+
+    /* check */
+    if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall);   /* last match must start at a minimum distance of 8 from oend */
+    if (oMatchEnd > oend) return ERROR(dstSize_tooSmall);   /* overwrite beyond dst buffer */
+    if (litEnd > litLimit) return ERROR(corruption_detected);   /* risk read beyond lit buffer */
+
+    /* copy Literals */
+    ZSTDv05_wildcopy(op, *litPtr, sequence.litLength);   /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
+    op = oLitEnd;
+    *litPtr = litEnd;   /* update for next sequence */
+
+    /* copy Match */
+    if (sequence.offset > (size_t)(oLitEnd - base)) {
+        /* offset beyond prefix */
+        if (sequence.offset > (size_t)(oLitEnd - vBase))
+            return ERROR(corruption_detected);
+        match = dictEnd - (base-match);
+        if (match + sequence.matchLength <= dictEnd) {
+            memmove(oLitEnd, match, sequence.matchLength);
+            return sequenceLength;
+        }
+        /* span extDict & currentPrefixSegment */
+        {
+            size_t length1 = dictEnd - match;
+            memmove(oLitEnd, match, length1);
+            op = oLitEnd + length1;
+            sequence.matchLength -= length1;
+            match = base;
+            if (op > oend_8 || sequence.matchLength < MINMATCH) {
+              while (op < oMatchEnd) *op++ = *match++;
+              return sequenceLength;
+            }
+    }   }
+    /* Requirement: op <= oend_8 */
+
+    /* match within prefix */
+    if (sequence.offset < 8) {
+        /* close range match, overlap */
+        const int sub2 = dec64table[sequence.offset];
+        op[0] = match[0];
+        op[1] = match[1];
+        op[2] = match[2];
+        op[3] = match[3];
+        match += dec32table[sequence.offset];
+        ZSTDv05_copy4(op+4, match);
+        match -= sub2;
+    } else {
+        ZSTDv05_copy8(op, match);
+    }
+    op += 8; match += 8;
+
+    if (oMatchEnd > oend-(16-MINMATCH)) {
+        if (op < oend_8) {
+            ZSTDv05_wildcopy(op, match, oend_8 - op);
+            match += oend_8 - op;
+            op = oend_8;
+        }
+        while (op < oMatchEnd)
+            *op++ = *match++;
+    } else {
+        ZSTDv05_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
+    }
+    return sequenceLength;
+}
+
+
+static size_t ZSTDv05_decompressSequences(
+                               ZSTDv05_DCtx* dctx,
+                               void* dst, size_t maxDstSize,
+                         const void* seqStart, size_t seqSize)
+{
+    const BYTE* ip = (const BYTE*)seqStart;
+    const BYTE* const iend = ip + seqSize;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* op = ostart;
+    BYTE* const oend = ostart + maxDstSize;
+    size_t errorCode, dumpsLength;
+    const BYTE* litPtr = dctx->litPtr;
+    const BYTE* const litEnd = litPtr + dctx->litSize;
+    int nbSeq;
+    const BYTE* dumps;
+    U32* DTableLL = dctx->LLTable;
+    U32* DTableML = dctx->MLTable;
+    U32* DTableOffb = dctx->OffTable;
+    const BYTE* const base = (const BYTE*) (dctx->base);
+    const BYTE* const vBase = (const BYTE*) (dctx->vBase);
+    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+
+    /* Build Decoding Tables */
+    errorCode = ZSTDv05_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
+                                      DTableLL, DTableML, DTableOffb,
+                                      ip, seqSize, dctx->flagStaticTables);
+    if (ZSTDv05_isError(errorCode)) return errorCode;
+    ip += errorCode;
+
+    /* Regen sequences */
+    if (nbSeq) {
+        seq_t sequence;
+        seqState_t seqState;
+
+        memset(&sequence, 0, sizeof(sequence));
+        sequence.offset = REPCODE_STARTVALUE;
+        seqState.dumps = dumps;
+        seqState.dumpsEnd = dumps + dumpsLength;
+        seqState.prevOffset = REPCODE_STARTVALUE;
+        errorCode = BITv05_initDStream(&(seqState.DStream), ip, iend-ip);
+        if (ERR_isError(errorCode)) return ERROR(corruption_detected);
+        FSEv05_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
+        FSEv05_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
+        FSEv05_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
+
+        for ( ; (BITv05_reloadDStream(&(seqState.DStream)) <= BITv05_DStream_completed) && nbSeq ; ) {
+            size_t oneSeqSize;
+            nbSeq--;
+            ZSTDv05_decodeSequence(&sequence, &seqState);
+            oneSeqSize = ZSTDv05_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
+            if (ZSTDv05_isError(oneSeqSize)) return oneSeqSize;
+            op += oneSeqSize;
+        }
+
+        /* check if reached exact end */
+        if (nbSeq) return ERROR(corruption_detected);
+    }
+
+    /* last literal segment */
+    {
+        size_t lastLLSize = litEnd - litPtr;
+        if (litPtr > litEnd) return ERROR(corruption_detected);   /* too many literals already used */
+        if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
+        memcpy(op, litPtr, lastLLSize);
+        op += lastLLSize;
+    }
+
+    return op-ostart;
+}
+
+
+static void ZSTDv05_checkContinuity(ZSTDv05_DCtx* dctx, const void* dst)
+{
+    if (dst != dctx->previousDstEnd) {   /* not contiguous */
+        dctx->dictEnd = dctx->previousDstEnd;
+        dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
+        dctx->base = dst;
+        dctx->previousDstEnd = dst;
+    }
+}
+
+
+static size_t ZSTDv05_decompressBlock_internal(ZSTDv05_DCtx* dctx,
+                            void* dst, size_t dstCapacity,
+                      const void* src, size_t srcSize)
+{   /* blockType == blockCompressed */
+    const BYTE* ip = (const BYTE*)src;
+    size_t litCSize;
+
+    if (srcSize >= BLOCKSIZE) return ERROR(srcSize_wrong);
+
+    /* Decode literals sub-block */
+    litCSize = ZSTDv05_decodeLiteralsBlock(dctx, src, srcSize);
+    if (ZSTDv05_isError(litCSize)) return litCSize;
+    ip += litCSize;
+    srcSize -= litCSize;
+
+    return ZSTDv05_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
+}
+
+
+size_t ZSTDv05_decompressBlock(ZSTDv05_DCtx* dctx,
+                            void* dst, size_t dstCapacity,
+                      const void* src, size_t srcSize)
+{
+    ZSTDv05_checkContinuity(dctx, dst);
+    return ZSTDv05_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
+}
+
+
+/*! ZSTDv05_decompress_continueDCtx
+*   dctx must have been properly initialized */
+static size_t ZSTDv05_decompress_continueDCtx(ZSTDv05_DCtx* dctx,
+                                 void* dst, size_t maxDstSize,
+                                 const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+    const BYTE* iend = ip + srcSize;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* op = ostart;
+    BYTE* const oend = ostart + maxDstSize;
+    size_t remainingSize = srcSize;
+    blockProperties_t blockProperties;
+
+    /* Frame Header */
+    {
+        size_t frameHeaderSize;
+        if (srcSize < ZSTDv05_frameHeaderSize_min+ZSTDv05_blockHeaderSize) return ERROR(srcSize_wrong);
+        frameHeaderSize = ZSTDv05_decodeFrameHeader_Part1(dctx, src, ZSTDv05_frameHeaderSize_min);
+        if (ZSTDv05_isError(frameHeaderSize)) return frameHeaderSize;
+        if (srcSize < frameHeaderSize+ZSTDv05_blockHeaderSize) return ERROR(srcSize_wrong);
+        ip += frameHeaderSize; remainingSize -= frameHeaderSize;
+        frameHeaderSize = ZSTDv05_decodeFrameHeader_Part2(dctx, src, frameHeaderSize);
+        if (ZSTDv05_isError(frameHeaderSize)) return frameHeaderSize;
+    }
+
+    /* Loop on each block */
+    while (1)
+    {
+        size_t decodedSize=0;
+        size_t cBlockSize = ZSTDv05_getcBlockSize(ip, iend-ip, &blockProperties);
+        if (ZSTDv05_isError(cBlockSize)) return cBlockSize;
+
+        ip += ZSTDv05_blockHeaderSize;
+        remainingSize -= ZSTDv05_blockHeaderSize;
+        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+        switch(blockProperties.blockType)
+        {
+        case bt_compressed:
+            decodedSize = ZSTDv05_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize);
+            break;
+        case bt_raw :
+            decodedSize = ZSTDv05_copyRawBlock(op, oend-op, ip, cBlockSize);
+            break;
+        case bt_rle :
+            return ERROR(GENERIC);   /* not yet supported */
+            break;
+        case bt_end :
+            /* end of frame */
+            if (remainingSize) return ERROR(srcSize_wrong);
+            break;
+        default:
+            return ERROR(GENERIC);   /* impossible */
+        }
+        if (cBlockSize == 0) break;   /* bt_end */
+
+        if (ZSTDv05_isError(decodedSize)) return decodedSize;
+        op += decodedSize;
+        ip += cBlockSize;
+        remainingSize -= cBlockSize;
+    }
+
+    return op-ostart;
+}
+
+
+size_t ZSTDv05_decompress_usingPreparedDCtx(ZSTDv05_DCtx* dctx, const ZSTDv05_DCtx* refDCtx,
+                                         void* dst, size_t maxDstSize,
+                                   const void* src, size_t srcSize)
+{
+    ZSTDv05_copyDCtx(dctx, refDCtx);
+    ZSTDv05_checkContinuity(dctx, dst);
+    return ZSTDv05_decompress_continueDCtx(dctx, dst, maxDstSize, src, srcSize);
+}
+
+
+size_t ZSTDv05_decompress_usingDict(ZSTDv05_DCtx* dctx,
+                                 void* dst, size_t maxDstSize,
+                                 const void* src, size_t srcSize,
+                                 const void* dict, size_t dictSize)
+{
+    ZSTDv05_decompressBegin_usingDict(dctx, dict, dictSize);
+    ZSTDv05_checkContinuity(dctx, dst);
+    return ZSTDv05_decompress_continueDCtx(dctx, dst, maxDstSize, src, srcSize);
+}
+
+
+size_t ZSTDv05_decompressDCtx(ZSTDv05_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    return ZSTDv05_decompress_usingDict(dctx, dst, maxDstSize, src, srcSize, NULL, 0);
+}
+
+size_t ZSTDv05_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+#if defined(ZSTDv05_HEAPMODE) && (ZSTDv05_HEAPMODE==1)
+    size_t regenSize;
+    ZSTDv05_DCtx* dctx = ZSTDv05_createDCtx();
+    if (dctx==NULL) return ERROR(memory_allocation);
+    regenSize = ZSTDv05_decompressDCtx(dctx, dst, maxDstSize, src, srcSize);
+    ZSTDv05_freeDCtx(dctx);
+    return regenSize;
+#else
+    ZSTDv05_DCtx dctx;
+    return ZSTDv05_decompressDCtx(&dctx, dst, maxDstSize, src, srcSize);
+#endif
+}
+
+size_t ZSTDv05_findFrameCompressedSize(const void *src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+    size_t remainingSize = srcSize;
+    blockProperties_t blockProperties;
+
+    /* Frame Header */
+    if (srcSize < ZSTDv05_frameHeaderSize_min) return ERROR(srcSize_wrong);
+    if (MEM_readLE32(src) != ZSTDv05_MAGICNUMBER) return ERROR(prefix_unknown);
+    ip += ZSTDv05_frameHeaderSize_min; remainingSize -= ZSTDv05_frameHeaderSize_min;
+
+    /* Loop on each block */
+    while (1)
+    {
+        size_t cBlockSize = ZSTDv05_getcBlockSize(ip, remainingSize, &blockProperties);
+        if (ZSTDv05_isError(cBlockSize)) return cBlockSize;
+
+        ip += ZSTDv05_blockHeaderSize;
+        remainingSize -= ZSTDv05_blockHeaderSize;
+        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+        if (cBlockSize == 0) break;   /* bt_end */
+
+        ip += cBlockSize;
+        remainingSize -= cBlockSize;
+    }
+
+    return ip - (const BYTE*)src;
+}
+
+/* ******************************
+*  Streaming Decompression API
+********************************/
+size_t ZSTDv05_nextSrcSizeToDecompress(ZSTDv05_DCtx* dctx)
+{
+    return dctx->expected;
+}
+
+size_t ZSTDv05_decompressContinue(ZSTDv05_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    /* Sanity check */
+    if (srcSize != dctx->expected) return ERROR(srcSize_wrong);
+    ZSTDv05_checkContinuity(dctx, dst);
+
+    /* Decompress : frame header; part 1 */
+    switch (dctx->stage)
+    {
+    case ZSTDv05ds_getFrameHeaderSize :
+        /* get frame header size */
+        if (srcSize != ZSTDv05_frameHeaderSize_min) return ERROR(srcSize_wrong);   /* impossible */
+        dctx->headerSize = ZSTDv05_decodeFrameHeader_Part1(dctx, src, ZSTDv05_frameHeaderSize_min);
+        if (ZSTDv05_isError(dctx->headerSize)) return dctx->headerSize;
+        memcpy(dctx->headerBuffer, src, ZSTDv05_frameHeaderSize_min);
+        if (dctx->headerSize > ZSTDv05_frameHeaderSize_min) return ERROR(GENERIC); /* should never happen */
+        dctx->expected = 0;   /* not necessary to copy more */
+        /* fallthrough */
+    case ZSTDv05ds_decodeFrameHeader:
+        /* get frame header */
+        {   size_t const result = ZSTDv05_decodeFrameHeader_Part2(dctx, dctx->headerBuffer, dctx->headerSize);
+            if (ZSTDv05_isError(result)) return result;
+            dctx->expected = ZSTDv05_blockHeaderSize;
+            dctx->stage = ZSTDv05ds_decodeBlockHeader;
+            return 0;
+        }
+    case ZSTDv05ds_decodeBlockHeader:
+        {
+            /* Decode block header */
+            blockProperties_t bp;
+            size_t blockSize = ZSTDv05_getcBlockSize(src, ZSTDv05_blockHeaderSize, &bp);
+            if (ZSTDv05_isError(blockSize)) return blockSize;
+            if (bp.blockType == bt_end) {
+                dctx->expected = 0;
+                dctx->stage = ZSTDv05ds_getFrameHeaderSize;
+            }
+            else {
+                dctx->expected = blockSize;
+                dctx->bType = bp.blockType;
+                dctx->stage = ZSTDv05ds_decompressBlock;
+            }
+            return 0;
+        }
+    case ZSTDv05ds_decompressBlock:
+        {
+            /* Decompress : block content */
+            size_t rSize;
+            switch(dctx->bType)
+            {
+            case bt_compressed:
+                rSize = ZSTDv05_decompressBlock_internal(dctx, dst, maxDstSize, src, srcSize);
+                break;
+            case bt_raw :
+                rSize = ZSTDv05_copyRawBlock(dst, maxDstSize, src, srcSize);
+                break;
+            case bt_rle :
+                return ERROR(GENERIC);   /* not yet handled */
+                break;
+            case bt_end :   /* should never happen (filtered at phase 1) */
+                rSize = 0;
+                break;
+            default:
+                return ERROR(GENERIC);   /* impossible */
+            }
+            dctx->stage = ZSTDv05ds_decodeBlockHeader;
+            dctx->expected = ZSTDv05_blockHeaderSize;
+            dctx->previousDstEnd = (char*)dst + rSize;
+            return rSize;
+        }
+    default:
+        return ERROR(GENERIC);   /* impossible */
+    }
+}
+
+
+static void ZSTDv05_refDictContent(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize)
+{
+    dctx->dictEnd = dctx->previousDstEnd;
+    dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
+    dctx->base = dict;
+    dctx->previousDstEnd = (const char*)dict + dictSize;
+}
+
+static size_t ZSTDv05_loadEntropy(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize)
+{
+    size_t hSize, offcodeHeaderSize, matchlengthHeaderSize, errorCode, litlengthHeaderSize;
+    short offcodeNCount[MaxOff+1];
+    U32 offcodeMaxValue=MaxOff, offcodeLog;
+    short matchlengthNCount[MaxML+1];
+    unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+    short litlengthNCount[MaxLL+1];
+    unsigned litlengthMaxValue = MaxLL, litlengthLog;
+
+    hSize = HUFv05_readDTableX4(dctx->hufTableX4, dict, dictSize);
+    if (HUFv05_isError(hSize)) return ERROR(dictionary_corrupted);
+    dict = (const char*)dict + hSize;
+    dictSize -= hSize;
+
+    offcodeHeaderSize = FSEv05_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dict, dictSize);
+    if (FSEv05_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
+    if (offcodeLog > OffFSEv05Log) return ERROR(dictionary_corrupted);
+    errorCode = FSEv05_buildDTable(dctx->OffTable, offcodeNCount, offcodeMaxValue, offcodeLog);
+    if (FSEv05_isError(errorCode)) return ERROR(dictionary_corrupted);
+    dict = (const char*)dict + offcodeHeaderSize;
+    dictSize -= offcodeHeaderSize;
+
+    matchlengthHeaderSize = FSEv05_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dict, dictSize);
+    if (FSEv05_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
+    if (matchlengthLog > MLFSEv05Log) return ERROR(dictionary_corrupted);
+    errorCode = FSEv05_buildDTable(dctx->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog);
+    if (FSEv05_isError(errorCode)) return ERROR(dictionary_corrupted);
+    dict = (const char*)dict + matchlengthHeaderSize;
+    dictSize -= matchlengthHeaderSize;
+
+    litlengthHeaderSize = FSEv05_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dict, dictSize);
+    if (litlengthLog > LLFSEv05Log) return ERROR(dictionary_corrupted);
+    if (FSEv05_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
+    errorCode = FSEv05_buildDTable(dctx->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog);
+    if (FSEv05_isError(errorCode)) return ERROR(dictionary_corrupted);
+
+    dctx->flagStaticTables = 1;
+    return hSize + offcodeHeaderSize + matchlengthHeaderSize + litlengthHeaderSize;
+}
+
+static size_t ZSTDv05_decompress_insertDictionary(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize)
+{
+    size_t eSize;
+    U32 magic = MEM_readLE32(dict);
+    if (magic != ZSTDv05_DICT_MAGIC) {
+        /* pure content mode */
+        ZSTDv05_refDictContent(dctx, dict, dictSize);
+        return 0;
+    }
+    /* load entropy tables */
+    dict = (const char*)dict + 4;
+    dictSize -= 4;
+    eSize = ZSTDv05_loadEntropy(dctx, dict, dictSize);
+    if (ZSTDv05_isError(eSize)) return ERROR(dictionary_corrupted);
+
+    /* reference dictionary content */
+    dict = (const char*)dict + eSize;
+    dictSize -= eSize;
+    ZSTDv05_refDictContent(dctx, dict, dictSize);
+
+    return 0;
+}
+
+
+size_t ZSTDv05_decompressBegin_usingDict(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize)
+{
+    size_t errorCode;
+    errorCode = ZSTDv05_decompressBegin(dctx);
+    if (ZSTDv05_isError(errorCode)) return errorCode;
+
+    if (dict && dictSize) {
+        errorCode = ZSTDv05_decompress_insertDictionary(dctx, dict, dictSize);
+        if (ZSTDv05_isError(errorCode)) return ERROR(dictionary_corrupted);
+    }
+
+    return 0;
+}
+
+/*
+    Buffered version of Zstd compression library
+    Copyright (C) 2015-2016, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd source repository : https://github.com/Cyan4973/zstd
+    - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+/* The objects defined into this file should be considered experimental.
+ * They are not labelled stable, as their prototype may change in the future.
+ * You can use them for tests, provide feedback, or if you can endure risk of future changes.
+ */
+
+
+
+/* *************************************
+*  Constants
+***************************************/
+static size_t ZBUFFv05_blockHeaderSize = 3;
+
+
+
+/* *** Compression *** */
+
+static size_t ZBUFFv05_limitCopy(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
+{
+    size_t length = MIN(maxDstSize, srcSize);
+    memcpy(dst, src, length);
+    return length;
+}
+
+
+
+
+/** ************************************************
+*  Streaming decompression
+*
+*  A ZBUFFv05_DCtx object is required to track streaming operation.
+*  Use ZBUFFv05_createDCtx() and ZBUFFv05_freeDCtx() to create/release resources.
+*  Use ZBUFFv05_decompressInit() to start a new decompression operation.
+*  ZBUFFv05_DCtx objects can be reused multiple times.
+*
+*  Use ZBUFFv05_decompressContinue() repetitively to consume your input.
+*  *srcSizePtr and *maxDstSizePtr can be any size.
+*  The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr.
+*  Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input.
+*  The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst .
+*  return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)
+*            or 0 when a frame is completely decoded
+*            or an error code, which can be tested using ZBUFFv05_isError().
+*
+*  Hint : recommended buffer sizes (not compulsory)
+*  output : 128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded.
+*  input : just follow indications from ZBUFFv05_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
+* **************************************************/
+
+typedef enum { ZBUFFv05ds_init, ZBUFFv05ds_readHeader, ZBUFFv05ds_loadHeader, ZBUFFv05ds_decodeHeader,
+               ZBUFFv05ds_read, ZBUFFv05ds_load, ZBUFFv05ds_flush } ZBUFFv05_dStage;
+
+/* *** Resource management *** */
+
+#define ZSTDv05_frameHeaderSize_max 5   /* too magical, should come from reference */
+struct ZBUFFv05_DCtx_s {
+    ZSTDv05_DCtx* zc;
+    ZSTDv05_parameters params;
+    char* inBuff;
+    size_t inBuffSize;
+    size_t inPos;
+    char* outBuff;
+    size_t outBuffSize;
+    size_t outStart;
+    size_t outEnd;
+    size_t hPos;
+    ZBUFFv05_dStage stage;
+    unsigned char headerBuffer[ZSTDv05_frameHeaderSize_max];
+};   /* typedef'd to ZBUFFv05_DCtx within "zstd_buffered.h" */
+
+
+ZBUFFv05_DCtx* ZBUFFv05_createDCtx(void)
+{
+    ZBUFFv05_DCtx* zbc = (ZBUFFv05_DCtx*)malloc(sizeof(ZBUFFv05_DCtx));
+    if (zbc==NULL) return NULL;
+    memset(zbc, 0, sizeof(*zbc));
+    zbc->zc = ZSTDv05_createDCtx();
+    zbc->stage = ZBUFFv05ds_init;
+    return zbc;
+}
+
+size_t ZBUFFv05_freeDCtx(ZBUFFv05_DCtx* zbc)
+{
+    if (zbc==NULL) return 0;   /* support free on null */
+    ZSTDv05_freeDCtx(zbc->zc);
+    free(zbc->inBuff);
+    free(zbc->outBuff);
+    free(zbc);
+    return 0;
+}
+
+
+/* *** Initialization *** */
+
+size_t ZBUFFv05_decompressInitDictionary(ZBUFFv05_DCtx* zbc, const void* dict, size_t dictSize)
+{
+    zbc->stage = ZBUFFv05ds_readHeader;
+    zbc->hPos = zbc->inPos = zbc->outStart = zbc->outEnd = 0;
+    return ZSTDv05_decompressBegin_usingDict(zbc->zc, dict, dictSize);
+}
+
+size_t ZBUFFv05_decompressInit(ZBUFFv05_DCtx* zbc)
+{
+    return ZBUFFv05_decompressInitDictionary(zbc, NULL, 0);
+}
+
+
+/* *** Decompression *** */
+
+size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* zbc, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr)
+{
+    const char* const istart = (const char*)src;
+    const char* ip = istart;
+    const char* const iend = istart + *srcSizePtr;
+    char* const ostart = (char*)dst;
+    char* op = ostart;
+    char* const oend = ostart + *maxDstSizePtr;
+    U32 notDone = 1;
+
+    while (notDone) {
+        switch(zbc->stage)
+        {
+        case ZBUFFv05ds_init :
+            return ERROR(init_missing);
+
+        case ZBUFFv05ds_readHeader :
+            /* read header from src */
+            {
+                size_t headerSize = ZSTDv05_getFrameParams(&(zbc->params), src, *srcSizePtr);
+                if (ZSTDv05_isError(headerSize)) return headerSize;
+                if (headerSize) {
+                    /* not enough input to decode header : tell how many bytes would be necessary */
+                    memcpy(zbc->headerBuffer+zbc->hPos, src, *srcSizePtr);
+                    zbc->hPos += *srcSizePtr;
+                    *maxDstSizePtr = 0;
+                    zbc->stage = ZBUFFv05ds_loadHeader;
+                    return headerSize - zbc->hPos;
+                }
+                zbc->stage = ZBUFFv05ds_decodeHeader;
+                break;
+            }
+	    /* fall-through */
+        case ZBUFFv05ds_loadHeader:
+            /* complete header from src */
+            {
+                size_t headerSize = ZBUFFv05_limitCopy(
+                    zbc->headerBuffer + zbc->hPos, ZSTDv05_frameHeaderSize_max - zbc->hPos,
+                    src, *srcSizePtr);
+                zbc->hPos += headerSize;
+                ip += headerSize;
+                headerSize = ZSTDv05_getFrameParams(&(zbc->params), zbc->headerBuffer, zbc->hPos);
+                if (ZSTDv05_isError(headerSize)) return headerSize;
+                if (headerSize) {
+                    /* not enough input to decode header : tell how many bytes would be necessary */
+                    *maxDstSizePtr = 0;
+                    return headerSize - zbc->hPos;
+                }
+                // zbc->stage = ZBUFFv05ds_decodeHeader; break;   /* useless : stage follows */
+            }
+	    /* fall-through */
+        case ZBUFFv05ds_decodeHeader:
+                /* apply header to create / resize buffers */
+                {
+                    size_t neededOutSize = (size_t)1 << zbc->params.windowLog;
+                    size_t neededInSize = BLOCKSIZE;   /* a block is never > BLOCKSIZE */
+                    if (zbc->inBuffSize < neededInSize) {
+                        free(zbc->inBuff);
+                        zbc->inBuffSize = neededInSize;
+                        zbc->inBuff = (char*)malloc(neededInSize);
+                        if (zbc->inBuff == NULL) return ERROR(memory_allocation);
+                    }
+                    if (zbc->outBuffSize < neededOutSize) {
+                        free(zbc->outBuff);
+                        zbc->outBuffSize = neededOutSize;
+                        zbc->outBuff = (char*)malloc(neededOutSize);
+                        if (zbc->outBuff == NULL) return ERROR(memory_allocation);
+                }   }
+                if (zbc->hPos) {
+                    /* some data already loaded into headerBuffer : transfer into inBuff */
+                    memcpy(zbc->inBuff, zbc->headerBuffer, zbc->hPos);
+                    zbc->inPos = zbc->hPos;
+                    zbc->hPos = 0;
+                    zbc->stage = ZBUFFv05ds_load;
+                    break;
+                }
+                zbc->stage = ZBUFFv05ds_read;
+		/* fall-through */
+        case ZBUFFv05ds_read:
+            {
+                size_t neededInSize = ZSTDv05_nextSrcSizeToDecompress(zbc->zc);
+                if (neededInSize==0) {  /* end of frame */
+                    zbc->stage = ZBUFFv05ds_init;
+                    notDone = 0;
+                    break;
+                }
+                if ((size_t)(iend-ip) >= neededInSize) {
+                    /* directly decode from src */
+                    size_t decodedSize = ZSTDv05_decompressContinue(zbc->zc,
+                        zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,
+                        ip, neededInSize);
+                    if (ZSTDv05_isError(decodedSize)) return decodedSize;
+                    ip += neededInSize;
+                    if (!decodedSize) break;   /* this was just a header */
+                    zbc->outEnd = zbc->outStart +  decodedSize;
+                    zbc->stage = ZBUFFv05ds_flush;
+                    break;
+                }
+                if (ip==iend) { notDone = 0; break; }   /* no more input */
+                zbc->stage = ZBUFFv05ds_load;
+            }
+	    /* fall-through */
+        case ZBUFFv05ds_load:
+            {
+                size_t neededInSize = ZSTDv05_nextSrcSizeToDecompress(zbc->zc);
+                size_t toLoad = neededInSize - zbc->inPos;   /* should always be <= remaining space within inBuff */
+                size_t loadedSize;
+                if (toLoad > zbc->inBuffSize - zbc->inPos) return ERROR(corruption_detected);   /* should never happen */
+                loadedSize = ZBUFFv05_limitCopy(zbc->inBuff + zbc->inPos, toLoad, ip, iend-ip);
+                ip += loadedSize;
+                zbc->inPos += loadedSize;
+                if (loadedSize < toLoad) { notDone = 0; break; }   /* not enough input, wait for more */
+                {
+                    size_t decodedSize = ZSTDv05_decompressContinue(zbc->zc,
+                        zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,
+                        zbc->inBuff, neededInSize);
+                    if (ZSTDv05_isError(decodedSize)) return decodedSize;
+                    zbc->inPos = 0;   /* input is consumed */
+                    if (!decodedSize) { zbc->stage = ZBUFFv05ds_read; break; }   /* this was just a header */
+                    zbc->outEnd = zbc->outStart +  decodedSize;
+                    zbc->stage = ZBUFFv05ds_flush;
+                    // break; /* ZBUFFv05ds_flush follows */
+                }
+	    }
+	    /* fall-through */
+        case ZBUFFv05ds_flush:
+            {
+                size_t toFlushSize = zbc->outEnd - zbc->outStart;
+                size_t flushedSize = ZBUFFv05_limitCopy(op, oend-op, zbc->outBuff + zbc->outStart, toFlushSize);
+                op += flushedSize;
+                zbc->outStart += flushedSize;
+                if (flushedSize == toFlushSize) {
+                    zbc->stage = ZBUFFv05ds_read;
+                    if (zbc->outStart + BLOCKSIZE > zbc->outBuffSize)
+                        zbc->outStart = zbc->outEnd = 0;
+                    break;
+                }
+                /* cannot flush everything */
+                notDone = 0;
+                break;
+            }
+        default: return ERROR(GENERIC);   /* impossible */
+    }   }
+
+    *srcSizePtr = ip-istart;
+    *maxDstSizePtr = op-ostart;
+
+    {   size_t nextSrcSizeHint = ZSTDv05_nextSrcSizeToDecompress(zbc->zc);
+        if (nextSrcSizeHint > ZBUFFv05_blockHeaderSize) nextSrcSizeHint+= ZBUFFv05_blockHeaderSize;   /* get next block header too */
+        nextSrcSizeHint -= zbc->inPos;   /* already loaded*/
+        return nextSrcSizeHint;
+    }
+}
+
+
+
+/* *************************************
+*  Tool functions
+***************************************/
+unsigned ZBUFFv05_isError(size_t errorCode) { return ERR_isError(errorCode); }
+const char* ZBUFFv05_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
+
+size_t ZBUFFv05_recommendedDInSize(void)  { return BLOCKSIZE + ZBUFFv05_blockHeaderSize /* block header size*/ ; }
+size_t ZBUFFv05_recommendedDOutSize(void) { return BLOCKSIZE; }
diff --git a/vendor/github.com/DataDog/zstd/zstd_v05.h b/vendor/github.com/DataDog/zstd/zstd_v05.h
new file mode 100644
index 0000000..b68fd57
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v05.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTDv05_H
+#define ZSTDv05_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*-*************************************
+*  Dependencies
+***************************************/
+#include <stddef.h>   /* size_t */
+#include "mem.h"      /* U64, U32 */
+
+
+/* *************************************
+*  Simple functions
+***************************************/
+/*! ZSTDv05_decompress() :
+    `compressedSize` : is the _exact_ size of the compressed blob, otherwise decompression will fail.
+    `dstCapacity` must be large enough, equal or larger than originalSize.
+    @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
+              or an errorCode if it fails (which can be tested using ZSTDv05_isError()) */
+size_t ZSTDv05_decompress( void* dst, size_t dstCapacity,
+                     const void* src, size_t compressedSize);
+
+/**
+ZSTDv05_getFrameSrcSize() : get the source length of a ZSTD frame
+    compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
+    return : the number of bytes that would be read to decompress this frame
+             or an errorCode if it fails (which can be tested using ZSTDv05_isError())
+*/
+size_t ZSTDv05_findFrameCompressedSize(const void* src, size_t compressedSize);
+
+/* *************************************
+*  Helper functions
+***************************************/
+/* Error Management */
+unsigned    ZSTDv05_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */
+const char* ZSTDv05_getErrorName(size_t code);     /*!< provides readable string for an error code */
+
+
+/* *************************************
+*  Explicit memory management
+***************************************/
+/** Decompression context */
+typedef struct ZSTDv05_DCtx_s ZSTDv05_DCtx;
+ZSTDv05_DCtx* ZSTDv05_createDCtx(void);
+size_t ZSTDv05_freeDCtx(ZSTDv05_DCtx* dctx);      /*!< @return : errorCode */
+
+/** ZSTDv05_decompressDCtx() :
+*   Same as ZSTDv05_decompress(), but requires an already allocated ZSTDv05_DCtx (see ZSTDv05_createDCtx()) */
+size_t ZSTDv05_decompressDCtx(ZSTDv05_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+/*-***********************
+*  Simple Dictionary API
+*************************/
+/*! ZSTDv05_decompress_usingDict() :
+*   Decompression using a pre-defined Dictionary content (see dictBuilder).
+*   Dictionary must be identical to the one used during compression, otherwise regenerated data will be corrupted.
+*   Note : dict can be NULL, in which case, it's equivalent to ZSTDv05_decompressDCtx() */
+size_t ZSTDv05_decompress_usingDict(ZSTDv05_DCtx* dctx,
+                                            void* dst, size_t dstCapacity,
+                                      const void* src, size_t srcSize,
+                                      const void* dict,size_t dictSize);
+
+/*-************************
+*  Advanced Streaming API
+***************************/
+typedef enum { ZSTDv05_fast, ZSTDv05_greedy, ZSTDv05_lazy, ZSTDv05_lazy2, ZSTDv05_btlazy2, ZSTDv05_opt, ZSTDv05_btopt } ZSTDv05_strategy;
+typedef struct {
+    U64 srcSize;
+    U32 windowLog;     /* the only useful information to retrieve */
+    U32 contentLog; U32 hashLog; U32 searchLog; U32 searchLength; U32 targetLength; ZSTDv05_strategy strategy;
+} ZSTDv05_parameters;
+size_t ZSTDv05_getFrameParams(ZSTDv05_parameters* params, const void* src, size_t srcSize);
+
+size_t ZSTDv05_decompressBegin_usingDict(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize);
+void   ZSTDv05_copyDCtx(ZSTDv05_DCtx* dstDCtx, const ZSTDv05_DCtx* srcDCtx);
+size_t ZSTDv05_nextSrcSizeToDecompress(ZSTDv05_DCtx* dctx);
+size_t ZSTDv05_decompressContinue(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+/*-***********************
+*  ZBUFF API
+*************************/
+typedef struct ZBUFFv05_DCtx_s ZBUFFv05_DCtx;
+ZBUFFv05_DCtx* ZBUFFv05_createDCtx(void);
+size_t         ZBUFFv05_freeDCtx(ZBUFFv05_DCtx* dctx);
+
+size_t ZBUFFv05_decompressInit(ZBUFFv05_DCtx* dctx);
+size_t ZBUFFv05_decompressInitDictionary(ZBUFFv05_DCtx* dctx, const void* dict, size_t dictSize);
+
+size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* dctx,
+                                            void* dst, size_t* dstCapacityPtr,
+                                      const void* src, size_t* srcSizePtr);
+
+/*-***************************************************************************
+*  Streaming decompression
+*
+*  A ZBUFFv05_DCtx object is required to track streaming operations.
+*  Use ZBUFFv05_createDCtx() and ZBUFFv05_freeDCtx() to create/release resources.
+*  Use ZBUFFv05_decompressInit() to start a new decompression operation,
+*   or ZBUFFv05_decompressInitDictionary() if decompression requires a dictionary.
+*  Note that ZBUFFv05_DCtx objects can be reused multiple times.
+*
+*  Use ZBUFFv05_decompressContinue() repetitively to consume your input.
+*  *srcSizePtr and *dstCapacityPtr can be any size.
+*  The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
+*  Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
+*  The content of @dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters or change @dst.
+*  @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency)
+*            or 0 when a frame is completely decoded
+*            or an error code, which can be tested using ZBUFFv05_isError().
+*
+*  Hint : recommended buffer sizes (not compulsory) : ZBUFFv05_recommendedDInSize() / ZBUFFv05_recommendedDOutSize()
+*  output : ZBUFFv05_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
+*  input  : ZBUFFv05_recommendedDInSize==128Kb+3; just follow indications from ZBUFFv05_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
+* *******************************************************************************/
+
+
+/* *************************************
+*  Tool functions
+***************************************/
+unsigned ZBUFFv05_isError(size_t errorCode);
+const char* ZBUFFv05_getErrorName(size_t errorCode);
+
+/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
+*   These sizes are just hints, and tend to offer better latency */
+size_t ZBUFFv05_recommendedDInSize(void);
+size_t ZBUFFv05_recommendedDOutSize(void);
+
+
+
+/*-*************************************
+*  Constants
+***************************************/
+#define ZSTDv05_MAGICNUMBER 0xFD2FB525   /* v0.5 */
+
+
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif  /* ZSTDv0505_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_v06.c b/vendor/github.com/DataDog/zstd/zstd_v06.c
new file mode 100644
index 0000000..8b068b3
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v06.c
@@ -0,0 +1,4124 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/*- Dependencies -*/
+#include "zstd_v06.h"
+#include <stddef.h>    /* size_t, ptrdiff_t */
+#include <string.h>    /* memcpy */
+#include <stdlib.h>    /* malloc, free, qsort */
+#include "error_private.h"
+
+
+
+/* ******************************************************************
+   mem.h
+   low-level memory access routines
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef MEM_H_MODULE
+#define MEM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*-****************************************
+*  Compiler specifics
+******************************************/
+#if defined(_MSC_VER)   /* Visual Studio */
+#   include <stdlib.h>  /* _byteswap_ulong */
+#   include <intrin.h>  /* _byteswap_* */
+#endif
+#if defined(__GNUC__)
+#  define MEM_STATIC static __attribute__((unused))
+#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+#  define MEM_STATIC static inline
+#elif defined(_MSC_VER)
+#  define MEM_STATIC static __inline
+#else
+#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+
+/*-**************************************************************
+*  Basic Types
+*****************************************************************/
+#if  !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+  typedef  uint8_t BYTE;
+  typedef uint16_t U16;
+  typedef  int16_t S16;
+  typedef uint32_t U32;
+  typedef  int32_t S32;
+  typedef uint64_t U64;
+  typedef  int64_t S64;
+#else
+  typedef unsigned char       BYTE;
+  typedef unsigned short      U16;
+  typedef   signed short      S16;
+  typedef unsigned int        U32;
+  typedef   signed int        S32;
+  typedef unsigned long long  U64;
+  typedef   signed long long  S64;
+#endif
+
+
+/*-**************************************************************
+*  Memory I/O
+*****************************************************************/
+/* MEM_FORCE_MEMORY_ACCESS :
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ *            It can generate buggy code on targets depending on alignment.
+ *            In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
+#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+#    define MEM_FORCE_MEMORY_ACCESS 2
+#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
+  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+#    define MEM_FORCE_MEMORY_ACCESS 1
+#  endif
+#endif
+
+MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
+MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
+
+MEM_STATIC unsigned MEM_isLittleEndian(void)
+{
+    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */
+    return one.c[0];
+}
+
+#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
+
+/* violates C standard, by lying on structure alignment.
+Only use if no other choice to achieve best performance on target platform */
+MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
+MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
+MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
+
+#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign;
+
+MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
+MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
+
+#else
+
+/* default method, safe and standard.
+   can sometimes prove slower */
+
+MEM_STATIC U16 MEM_read16(const void* memPtr)
+{
+    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U32 MEM_read32(const void* memPtr)
+{
+    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U64 MEM_read64(const void* memPtr)
+{
+    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value)
+{
+    memcpy(memPtr, &value, sizeof(value));
+}
+
+
+#endif /* MEM_FORCE_MEMORY_ACCESS */
+
+MEM_STATIC U32 MEM_swap32(U32 in)
+{
+#if defined(_MSC_VER)     /* Visual Studio */
+    return _byteswap_ulong(in);
+#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
+    return __builtin_bswap32(in);
+#else
+    return  ((in << 24) & 0xff000000 ) |
+            ((in <<  8) & 0x00ff0000 ) |
+            ((in >>  8) & 0x0000ff00 ) |
+            ((in >> 24) & 0x000000ff );
+#endif
+}
+
+MEM_STATIC U64 MEM_swap64(U64 in)
+{
+#if defined(_MSC_VER)     /* Visual Studio */
+    return _byteswap_uint64(in);
+#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
+    return __builtin_bswap64(in);
+#else
+    return  ((in << 56) & 0xff00000000000000ULL) |
+            ((in << 40) & 0x00ff000000000000ULL) |
+            ((in << 24) & 0x0000ff0000000000ULL) |
+            ((in << 8)  & 0x000000ff00000000ULL) |
+            ((in >> 8)  & 0x00000000ff000000ULL) |
+            ((in >> 24) & 0x0000000000ff0000ULL) |
+            ((in >> 40) & 0x000000000000ff00ULL) |
+            ((in >> 56) & 0x00000000000000ffULL);
+#endif
+}
+
+
+/*=== Little endian r/w ===*/
+
+MEM_STATIC U16 MEM_readLE16(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read16(memPtr);
+    else {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U16)(p[0] + (p[1]<<8));
+    }
+}
+
+MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
+{
+    if (MEM_isLittleEndian()) {
+        MEM_write16(memPtr, val);
+    } else {
+        BYTE* p = (BYTE*)memPtr;
+        p[0] = (BYTE)val;
+        p[1] = (BYTE)(val>>8);
+    }
+}
+
+MEM_STATIC U32 MEM_readLE32(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read32(memPtr);
+    else
+        return MEM_swap32(MEM_read32(memPtr));
+}
+
+
+MEM_STATIC U64 MEM_readLE64(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read64(memPtr);
+    else
+        return MEM_swap64(MEM_read64(memPtr));
+}
+
+
+MEM_STATIC size_t MEM_readLEST(const void* memPtr)
+{
+    if (MEM_32bits())
+        return (size_t)MEM_readLE32(memPtr);
+    else
+        return (size_t)MEM_readLE64(memPtr);
+}
+
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* MEM_H_MODULE */
+
+/*
+    zstd - standard compression library
+    Header File for static linking only
+    Copyright (C) 2014-2016, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd homepage : http://www.zstd.net
+*/
+#ifndef ZSTDv06_STATIC_H
+#define ZSTDv06_STATIC_H
+
+/* The prototypes defined within this file are considered experimental.
+ * They should not be used in the context DLL as they may change in the future.
+ * Prefer static linking if you need them, to control breaking version changes issues.
+ */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+
+/*- Advanced Decompression functions -*/
+
+/*! ZSTDv06_decompress_usingPreparedDCtx() :
+*   Same as ZSTDv06_decompress_usingDict, but using a reference context `preparedDCtx`, where dictionary has been loaded.
+*   It avoids reloading the dictionary each time.
+*   `preparedDCtx` must have been properly initialized using ZSTDv06_decompressBegin_usingDict().
+*   Requires 2 contexts : 1 for reference (preparedDCtx), which will not be modified, and 1 to run the decompression operation (dctx) */
+ZSTDLIBv06_API size_t ZSTDv06_decompress_usingPreparedDCtx(
+                                           ZSTDv06_DCtx* dctx, const ZSTDv06_DCtx* preparedDCtx,
+                                           void* dst, size_t dstCapacity,
+                                     const void* src, size_t srcSize);
+
+
+
+#define ZSTDv06_FRAMEHEADERSIZE_MAX 13    /* for static allocation */
+static const size_t ZSTDv06_frameHeaderSize_min = 5;
+static const size_t ZSTDv06_frameHeaderSize_max = ZSTDv06_FRAMEHEADERSIZE_MAX;
+
+ZSTDLIBv06_API size_t ZSTDv06_decompressBegin(ZSTDv06_DCtx* dctx);
+
+/*
+  Streaming decompression, direct mode (bufferless)
+
+  A ZSTDv06_DCtx object is required to track streaming operations.
+  Use ZSTDv06_createDCtx() / ZSTDv06_freeDCtx() to manage it.
+  A ZSTDv06_DCtx object can be re-used multiple times.
+
+  First optional operation is to retrieve frame parameters, using ZSTDv06_getFrameParams(), which doesn't consume the input.
+  It can provide the minimum size of rolling buffer required to properly decompress data,
+  and optionally the final size of uncompressed content.
+  (Note : content size is an optional info that may not be present. 0 means : content size unknown)
+  Frame parameters are extracted from the beginning of compressed frame.
+  The amount of data to read is variable, from ZSTDv06_frameHeaderSize_min to ZSTDv06_frameHeaderSize_max (so if `srcSize` >= ZSTDv06_frameHeaderSize_max, it will always work)
+  If `srcSize` is too small for operation to succeed, function will return the minimum size it requires to produce a result.
+  Result : 0 when successful, it means the ZSTDv06_frameParams structure has been filled.
+          >0 : means there is not enough data into `src`. Provides the expected size to successfully decode header.
+           errorCode, which can be tested using ZSTDv06_isError()
+
+  Start decompression, with ZSTDv06_decompressBegin() or ZSTDv06_decompressBegin_usingDict().
+  Alternatively, you can copy a prepared context, using ZSTDv06_copyDCtx().
+
+  Then use ZSTDv06_nextSrcSizeToDecompress() and ZSTDv06_decompressContinue() alternatively.
+  ZSTDv06_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTDv06_decompressContinue().
+  ZSTDv06_decompressContinue() requires this exact amount of bytes, or it will fail.
+  ZSTDv06_decompressContinue() needs previous data blocks during decompression, up to (1 << windowlog).
+  They should preferably be located contiguously, prior to current block. Alternatively, a round buffer is also possible.
+
+  @result of ZSTDv06_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity)
+  It can be zero, which is not an error; it just means ZSTDv06_decompressContinue() has decoded some header.
+
+  A frame is fully decoded when ZSTDv06_nextSrcSizeToDecompress() returns zero.
+  Context can then be reset to start a new decompression.
+*/
+
+
+/* **************************************
+*  Block functions
+****************************************/
+/*! Block functions produce and decode raw zstd blocks, without frame metadata.
+    User will have to take in charge required information to regenerate data, such as compressed and content sizes.
+
+    A few rules to respect :
+    - Uncompressed block size must be <= ZSTDv06_BLOCKSIZE_MAX (128 KB)
+    - Compressing or decompressing requires a context structure
+      + Use ZSTDv06_createCCtx() and ZSTDv06_createDCtx()
+    - It is necessary to init context before starting
+      + compression : ZSTDv06_compressBegin()
+      + decompression : ZSTDv06_decompressBegin()
+      + variants _usingDict() are also allowed
+      + copyCCtx() and copyDCtx() work too
+    - When a block is considered not compressible enough, ZSTDv06_compressBlock() result will be zero.
+      In which case, nothing is produced into `dst`.
+      + User must test for such outcome and deal directly with uncompressed data
+      + ZSTDv06_decompressBlock() doesn't accept uncompressed data as input !!
+*/
+
+#define ZSTDv06_BLOCKSIZE_MAX (128 * 1024)   /* define, for static allocation */
+ZSTDLIBv06_API size_t ZSTDv06_decompressBlock(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif  /* ZSTDv06_STATIC_H */
+/*
+    zstd_internal - common functions to include
+    Header File for include
+    Copyright (C) 2014-2016, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd homepage : https://www.zstd.net
+*/
+#ifndef ZSTDv06_CCOMMON_H_MODULE
+#define ZSTDv06_CCOMMON_H_MODULE
+
+
+/*-*************************************
+*  Common macros
+***************************************/
+#define MIN(a,b) ((a)<(b) ? (a) : (b))
+#define MAX(a,b) ((a)>(b) ? (a) : (b))
+
+
+/*-*************************************
+*  Common constants
+***************************************/
+#define ZSTDv06_DICT_MAGIC  0xEC30A436
+
+#define ZSTDv06_REP_NUM    3
+#define ZSTDv06_REP_INIT   ZSTDv06_REP_NUM
+#define ZSTDv06_REP_MOVE   (ZSTDv06_REP_NUM-1)
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BIT7 128
+#define BIT6  64
+#define BIT5  32
+#define BIT4  16
+#define BIT1   2
+#define BIT0   1
+
+#define ZSTDv06_WINDOWLOG_ABSOLUTEMIN 12
+static const size_t ZSTDv06_fcs_fieldSize[4] = { 0, 1, 2, 8 };
+
+#define ZSTDv06_BLOCKHEADERSIZE 3   /* because C standard does not allow a static const value to be defined using another static const value .... :( */
+static const size_t ZSTDv06_blockHeaderSize = ZSTDv06_BLOCKHEADERSIZE;
+typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
+
+#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
+#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */)   /* for a non-null block */
+
+#define HufLog 12
+
+#define IS_HUF 0
+#define IS_PCH 1
+#define IS_RAW 2
+#define IS_RLE 3
+
+#define LONGNBSEQ 0x7F00
+
+#define MINMATCH 3
+#define EQUAL_READ32 4
+#define REPCODE_STARTVALUE 1
+
+#define Litbits  8
+#define MaxLit ((1<<Litbits) - 1)
+#define MaxML  52
+#define MaxLL  35
+#define MaxOff 28
+#define MaxSeq MAX(MaxLL, MaxML)   /* Assumption : MaxOff < MaxLL,MaxML */
+#define MLFSELog    9
+#define LLFSELog    9
+#define OffFSELog   8
+
+#define FSEv06_ENCODING_RAW     0
+#define FSEv06_ENCODING_RLE     1
+#define FSEv06_ENCODING_STATIC  2
+#define FSEv06_ENCODING_DYNAMIC 3
+
+static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                                      1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9,10,11,12,
+                                     13,14,15,16 };
+static const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
+                                             2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
+                                            -1,-1,-1,-1 };
+static const U32 LL_defaultNormLog = 6;
+
+static const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                                      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                                      1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9,10,11,
+                                     12,13,14,15,16 };
+static const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+                                             1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+                                             1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,
+                                            -1,-1,-1,-1,-1 };
+static const U32 ML_defaultNormLog = 6;
+
+static const S16 OF_defaultNorm[MaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+                                              1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1 };
+static const U32 OF_defaultNormLog = 5;
+
+
+/*-*******************************************
+*  Shared functions to include for inlining
+*********************************************/
+static void ZSTDv06_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
+#define COPY8(d,s) { ZSTDv06_copy8(d,s); d+=8; s+=8; }
+
+/*! ZSTDv06_wildcopy() :
+*   custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
+#define WILDCOPY_OVERLENGTH 8
+MEM_STATIC void ZSTDv06_wildcopy(void* dst, const void* src, ptrdiff_t length)
+{
+    const BYTE* ip = (const BYTE*)src;
+    BYTE* op = (BYTE*)dst;
+    BYTE* const oend = op + length;
+    do
+        COPY8(op, ip)
+    while (op < oend);
+}
+
+
+
+/*-*******************************************
+*  Private interfaces
+*********************************************/
+typedef struct {
+    U32 off;
+    U32 len;
+} ZSTDv06_match_t;
+
+typedef struct {
+    U32 price;
+    U32 off;
+    U32 mlen;
+    U32 litlen;
+    U32 rep[ZSTDv06_REP_INIT];
+} ZSTDv06_optimal_t;
+
+typedef struct { U32  unused; } ZSTDv06_stats_t;
+
+typedef struct {
+    void* buffer;
+    U32*  offsetStart;
+    U32*  offset;
+    BYTE* offCodeStart;
+    BYTE* litStart;
+    BYTE* lit;
+    U16*  litLengthStart;
+    U16*  litLength;
+    BYTE* llCodeStart;
+    U16*  matchLengthStart;
+    U16*  matchLength;
+    BYTE* mlCodeStart;
+    U32   longLengthID;   /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
+    U32   longLengthPos;
+    /* opt */
+    ZSTDv06_optimal_t* priceTable;
+    ZSTDv06_match_t* matchTable;
+    U32* matchLengthFreq;
+    U32* litLengthFreq;
+    U32* litFreq;
+    U32* offCodeFreq;
+    U32  matchLengthSum;
+    U32  matchSum;
+    U32  litLengthSum;
+    U32  litSum;
+    U32  offCodeSum;
+    U32  log2matchLengthSum;
+    U32  log2matchSum;
+    U32  log2litLengthSum;
+    U32  log2litSum;
+    U32  log2offCodeSum;
+    U32  factor;
+    U32  cachedPrice;
+    U32  cachedLitLength;
+    const BYTE* cachedLiterals;
+    ZSTDv06_stats_t stats;
+} seqStore_t;
+
+void ZSTDv06_seqToCodes(const seqStore_t* seqStorePtr, size_t const nbSeq);
+
+
+#endif   /* ZSTDv06_CCOMMON_H_MODULE */
+/* ******************************************************************
+   FSE : Finite State Entropy codec
+   Public Prototypes declaration
+   Copyright (C) 2013-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef FSEv06_H
+#define FSEv06_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+
+/*-****************************************
+*  FSE simple functions
+******************************************/
+/*! FSEv06_decompress():
+    Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
+    into already allocated destination buffer 'dst', of size 'dstCapacity'.
+    @return : size of regenerated data (<= maxDstSize),
+              or an error code, which can be tested using FSEv06_isError() .
+
+    ** Important ** : FSEv06_decompress() does not decompress non-compressible nor RLE data !!!
+    Why ? : making this distinction requires a header.
+    Header management is intentionally delegated to the user layer, which can better manage special cases.
+*/
+size_t FSEv06_decompress(void* dst,  size_t dstCapacity,
+                const void* cSrc, size_t cSrcSize);
+
+
+/*-*****************************************
+*  Tool functions
+******************************************/
+size_t FSEv06_compressBound(size_t size);       /* maximum compressed size */
+
+/* Error Management */
+unsigned    FSEv06_isError(size_t code);        /* tells if a return value is an error code */
+const char* FSEv06_getErrorName(size_t code);   /* provides error code string (useful for debugging) */
+
+
+
+/*-*****************************************
+*  FSE detailed API
+******************************************/
+/*!
+
+FSEv06_decompress() does the following:
+1. read normalized counters with readNCount()
+2. build decoding table 'DTable' from normalized counters
+3. decode the data stream using decoding table 'DTable'
+
+The following API allows targeting specific sub-functions for advanced tasks.
+For example, it's possible to compress several blocks using the same 'CTable',
+or to save and provide normalized distribution using external method.
+*/
+
+
+/* *** DECOMPRESSION *** */
+
+/*! FSEv06_readNCount():
+    Read compactly saved 'normalizedCounter' from 'rBuffer'.
+    @return : size read from 'rBuffer',
+              or an errorCode, which can be tested using FSEv06_isError().
+              maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
+size_t FSEv06_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
+
+/*! Constructor and Destructor of FSEv06_DTable.
+    Note that its size depends on 'tableLog' */
+typedef unsigned FSEv06_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
+FSEv06_DTable* FSEv06_createDTable(unsigned tableLog);
+void        FSEv06_freeDTable(FSEv06_DTable* dt);
+
+/*! FSEv06_buildDTable():
+    Builds 'dt', which must be already allocated, using FSEv06_createDTable().
+    return : 0, or an errorCode, which can be tested using FSEv06_isError() */
+size_t FSEv06_buildDTable (FSEv06_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSEv06_decompress_usingDTable():
+    Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
+    into `dst` which must be already allocated.
+    @return : size of regenerated data (necessarily <= `dstCapacity`),
+              or an errorCode, which can be tested using FSEv06_isError() */
+size_t FSEv06_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSEv06_DTable* dt);
+
+/*!
+Tutorial :
+----------
+(Note : these functions only decompress FSE-compressed blocks.
+ If block is uncompressed, use memcpy() instead
+ If block is a single repeated byte, use memset() instead )
+
+The first step is to obtain the normalized frequencies of symbols.
+This can be performed by FSEv06_readNCount() if it was saved using FSEv06_writeNCount().
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
+In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
+or size the table to handle worst case situations (typically 256).
+FSEv06_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
+The result of FSEv06_readNCount() is the number of bytes read from 'rBuffer'.
+Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
+If there is an error, the function will return an error code, which can be tested using FSEv06_isError().
+
+The next step is to build the decompression tables 'FSEv06_DTable' from 'normalizedCounter'.
+This is performed by the function FSEv06_buildDTable().
+The space required by 'FSEv06_DTable' must be already allocated using FSEv06_createDTable().
+If there is an error, the function will return an error code, which can be tested using FSEv06_isError().
+
+`FSEv06_DTable` can then be used to decompress `cSrc`, with FSEv06_decompress_usingDTable().
+`cSrcSize` must be strictly correct, otherwise decompression will fail.
+FSEv06_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
+If there is an error, the function will return an error code, which can be tested using FSEv06_isError(). (ex: dst buffer too small)
+*/
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif  /* FSEv06_H */
+/* ******************************************************************
+   bitstream
+   Part of FSE library
+   header file (to include)
+   Copyright (C) 2013-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef BITSTREAM_H_MODULE
+#define BITSTREAM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*
+*  This API consists of small unitary functions, which must be inlined for best performance.
+*  Since link-time-optimization is not available for all compilers,
+*  these functions are defined into a .h to be included.
+*/
+
+
+/*=========================================
+*  Target specific
+=========================================*/
+#if defined(__BMI__) && defined(__GNUC__)
+#  include <immintrin.h>   /* support for bextr (experimental) */
+#endif
+
+
+
+/*-********************************************
+*  bitStream decoding API (read backward)
+**********************************************/
+typedef struct
+{
+    size_t   bitContainer;
+    unsigned bitsConsumed;
+    const char* ptr;
+    const char* start;
+} BITv06_DStream_t;
+
+typedef enum { BITv06_DStream_unfinished = 0,
+               BITv06_DStream_endOfBuffer = 1,
+               BITv06_DStream_completed = 2,
+               BITv06_DStream_overflow = 3 } BITv06_DStream_status;  /* result of BITv06_reloadDStream() */
+               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+
+MEM_STATIC size_t   BITv06_initDStream(BITv06_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
+MEM_STATIC size_t   BITv06_readBits(BITv06_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BITv06_DStream_status BITv06_reloadDStream(BITv06_DStream_t* bitD);
+MEM_STATIC unsigned BITv06_endOfDStream(const BITv06_DStream_t* bitD);
+
+
+
+/*-****************************************
+*  unsafe API
+******************************************/
+MEM_STATIC size_t BITv06_readBitsFast(BITv06_DStream_t* bitD, unsigned nbBits);
+/* faster, but works only if nbBits >= 1 */
+
+
+
+/*-**************************************************************
+*  Internal functions
+****************************************************************/
+MEM_STATIC unsigned BITv06_highbit32 ( U32 val)
+{
+#   if defined(_MSC_VER)   /* Visual */
+    unsigned long r=0;
+    _BitScanReverse ( &r, val );
+    return (unsigned) r;
+#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */
+    return 31 - __builtin_clz (val);
+#   else   /* Software version */
+    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+    U32 v = val;
+    unsigned r;
+    v |= v >> 1;
+    v |= v >> 2;
+    v |= v >> 4;
+    v |= v >> 8;
+    v |= v >> 16;
+    r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+    return r;
+#   endif
+}
+
+
+
+/*-********************************************************
+* bitStream decoding
+**********************************************************/
+/*! BITv06_initDStream() :
+*   Initialize a BITv06_DStream_t.
+*   `bitD` : a pointer to an already allocated BITv06_DStream_t structure.
+*   `srcSize` must be the *exact* size of the bitStream, in bytes.
+*   @return : size of stream (== srcSize) or an errorCode if a problem is detected
+*/
+MEM_STATIC size_t BITv06_initDStream(BITv06_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
+{
+    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
+
+    if (srcSize >=  sizeof(bitD->bitContainer)) {  /* normal case */
+        bitD->start = (const char*)srcBuffer;
+        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);
+        { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+          if (lastByte == 0) return ERROR(GENERIC);   /* endMark not present */
+          bitD->bitsConsumed = 8 - BITv06_highbit32(lastByte); }
+    } else {
+        bitD->start = (const char*)srcBuffer;
+        bitD->ptr   = bitD->start;
+        bitD->bitContainer = *(const BYTE*)(bitD->start);
+        switch(srcSize)
+        {
+            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);/* fall-through */
+            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);/* fall-through */
+            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);/* fall-through */
+            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; /* fall-through */
+            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; /* fall-through */
+            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) <<  8; /* fall-through */
+            default: break;
+        }
+        { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+          if (lastByte == 0) return ERROR(GENERIC);   /* endMark not present */
+          bitD->bitsConsumed = 8 - BITv06_highbit32(lastByte); }
+        bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
+    }
+
+    return srcSize;
+}
+
+
+ MEM_STATIC size_t BITv06_lookBits(const BITv06_DStream_t* bitD, U32 nbBits)
+{
+    U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
+    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
+}
+
+/*! BITv06_lookBitsFast() :
+*   unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BITv06_lookBitsFast(const BITv06_DStream_t* bitD, U32 nbBits)
+{
+    U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
+    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
+}
+
+MEM_STATIC void BITv06_skipBits(BITv06_DStream_t* bitD, U32 nbBits)
+{
+    bitD->bitsConsumed += nbBits;
+}
+
+MEM_STATIC size_t BITv06_readBits(BITv06_DStream_t* bitD, U32 nbBits)
+{
+    size_t const value = BITv06_lookBits(bitD, nbBits);
+    BITv06_skipBits(bitD, nbBits);
+    return value;
+}
+
+/*! BITv06_readBitsFast() :
+*   unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BITv06_readBitsFast(BITv06_DStream_t* bitD, U32 nbBits)
+{
+    size_t const value = BITv06_lookBitsFast(bitD, nbBits);
+    BITv06_skipBits(bitD, nbBits);
+    return value;
+}
+
+MEM_STATIC BITv06_DStream_status BITv06_reloadDStream(BITv06_DStream_t* bitD)
+{
+    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
+        return BITv06_DStream_overflow;
+
+    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
+        bitD->ptr -= bitD->bitsConsumed >> 3;
+        bitD->bitsConsumed &= 7;
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);
+        return BITv06_DStream_unfinished;
+    }
+    if (bitD->ptr == bitD->start) {
+        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BITv06_DStream_endOfBuffer;
+        return BITv06_DStream_completed;
+    }
+    {   U32 nbBytes = bitD->bitsConsumed >> 3;
+        BITv06_DStream_status result = BITv06_DStream_unfinished;
+        if (bitD->ptr - nbBytes < bitD->start) {
+            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */
+            result = BITv06_DStream_endOfBuffer;
+        }
+        bitD->ptr -= nbBytes;
+        bitD->bitsConsumed -= nbBytes*8;
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */
+        return result;
+    }
+}
+
+/*! BITv06_endOfDStream() :
+*   @return Tells if DStream has exactly reached its end (all bits consumed).
+*/
+MEM_STATIC unsigned BITv06_endOfDStream(const BITv06_DStream_t* DStream)
+{
+    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* BITSTREAM_H_MODULE */
+/* ******************************************************************
+   FSE : Finite State Entropy coder
+   header file for static linking (only)
+   Copyright (C) 2013-2015, Yann Collet
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+   - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef FSEv06_STATIC_H
+#define FSEv06_STATIC_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* *****************************************
+*  Static allocation
+*******************************************/
+/* FSE buffer bounds */
+#define FSEv06_NCOUNTBOUND 512
+#define FSEv06_BLOCKBOUND(size) (size + (size>>7))
+#define FSEv06_COMPRESSBOUND(size) (FSEv06_NCOUNTBOUND + FSEv06_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
+
+/* It is possible to statically allocate FSE CTable/DTable as a table of unsigned using below macros */
+#define FSEv06_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))
+
+
+/* *****************************************
+*  FSE advanced API
+*******************************************/
+size_t FSEv06_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
+/* same as FSEv06_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr  */
+
+size_t FSEv06_buildDTable_raw (FSEv06_DTable* dt, unsigned nbBits);
+/* build a fake FSEv06_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
+
+size_t FSEv06_buildDTable_rle (FSEv06_DTable* dt, unsigned char symbolValue);
+/* build a fake FSEv06_DTable, designed to always generate the same symbolValue */
+
+
+/* *****************************************
+*  FSE symbol decompression API
+*******************************************/
+typedef struct
+{
+    size_t      state;
+    const void* table;   /* precise table may vary, depending on U16 */
+} FSEv06_DState_t;
+
+
+static void     FSEv06_initDState(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD, const FSEv06_DTable* dt);
+
+static unsigned char FSEv06_decodeSymbol(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD);
+
+
+/* *****************************************
+*  FSE unsafe API
+*******************************************/
+static unsigned char FSEv06_decodeSymbolFast(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD);
+/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
+
+
+/* *****************************************
+*  Implementation of inlined functions
+*******************************************/
+
+
+/* ======    Decompression    ====== */
+
+typedef struct {
+    U16 tableLog;
+    U16 fastMode;
+} FSEv06_DTableHeader;   /* sizeof U32 */
+
+typedef struct
+{
+    unsigned short newState;
+    unsigned char  symbol;
+    unsigned char  nbBits;
+} FSEv06_decode_t;   /* size == U32 */
+
+MEM_STATIC void FSEv06_initDState(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD, const FSEv06_DTable* dt)
+{
+    const void* ptr = dt;
+    const FSEv06_DTableHeader* const DTableH = (const FSEv06_DTableHeader*)ptr;
+    DStatePtr->state = BITv06_readBits(bitD, DTableH->tableLog);
+    BITv06_reloadDStream(bitD);
+    DStatePtr->table = dt + 1;
+}
+
+MEM_STATIC BYTE FSEv06_peekSymbol(const FSEv06_DState_t* DStatePtr)
+{
+    FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    return DInfo.symbol;
+}
+
+MEM_STATIC void FSEv06_updateState(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD)
+{
+    FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    U32 const nbBits = DInfo.nbBits;
+    size_t const lowBits = BITv06_readBits(bitD, nbBits);
+    DStatePtr->state = DInfo.newState + lowBits;
+}
+
+MEM_STATIC BYTE FSEv06_decodeSymbol(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD)
+{
+    FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    U32 const nbBits = DInfo.nbBits;
+    BYTE const symbol = DInfo.symbol;
+    size_t const lowBits = BITv06_readBits(bitD, nbBits);
+
+    DStatePtr->state = DInfo.newState + lowBits;
+    return symbol;
+}
+
+/*! FSEv06_decodeSymbolFast() :
+    unsafe, only works if no symbol has a probability > 50% */
+MEM_STATIC BYTE FSEv06_decodeSymbolFast(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD)
+{
+    FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    U32 const nbBits = DInfo.nbBits;
+    BYTE const symbol = DInfo.symbol;
+    size_t const lowBits = BITv06_readBitsFast(bitD, nbBits);
+
+    DStatePtr->state = DInfo.newState + lowBits;
+    return symbol;
+}
+
+
+
+#ifndef FSEv06_COMMONDEFS_ONLY
+
+/* **************************************************************
+*  Tuning parameters
+****************************************************************/
+/*!MEMORY_USAGE :
+*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+*  Increasing memory usage improves compression ratio
+*  Reduced memory usage can improve speed, due to cache effect
+*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#define FSEv06_MAX_MEMORY_USAGE 14
+#define FSEv06_DEFAULT_MEMORY_USAGE 13
+
+/*!FSEv06_MAX_SYMBOL_VALUE :
+*  Maximum symbol value authorized.
+*  Required for proper stack allocation */
+#define FSEv06_MAX_SYMBOL_VALUE 255
+
+
+/* **************************************************************
+*  template functions type & suffix
+****************************************************************/
+#define FSEv06_FUNCTION_TYPE BYTE
+#define FSEv06_FUNCTION_EXTENSION
+#define FSEv06_DECODE_TYPE FSEv06_decode_t
+
+
+#endif   /* !FSEv06_COMMONDEFS_ONLY */
+
+
+/* ***************************************************************
+*  Constants
+*****************************************************************/
+#define FSEv06_MAX_TABLELOG  (FSEv06_MAX_MEMORY_USAGE-2)
+#define FSEv06_MAX_TABLESIZE (1U<<FSEv06_MAX_TABLELOG)
+#define FSEv06_MAXTABLESIZE_MASK (FSEv06_MAX_TABLESIZE-1)
+#define FSEv06_DEFAULT_TABLELOG (FSEv06_DEFAULT_MEMORY_USAGE-2)
+#define FSEv06_MIN_TABLELOG 5
+
+#define FSEv06_TABLELOG_ABSOLUTE_MAX 15
+#if FSEv06_MAX_TABLELOG > FSEv06_TABLELOG_ABSOLUTE_MAX
+#error "FSEv06_MAX_TABLELOG > FSEv06_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+#define FSEv06_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3)
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif  /* FSEv06_STATIC_H */
+/*
+   Common functions of New Generation Entropy library
+   Copyright (C) 2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+*************************************************************************** */
+
+
+/*-****************************************
+*  FSE Error Management
+******************************************/
+unsigned FSEv06_isError(size_t code) { return ERR_isError(code); }
+
+const char* FSEv06_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/* **************************************************************
+*  HUF Error Management
+****************************************************************/
+unsigned HUFv06_isError(size_t code) { return ERR_isError(code); }
+
+const char* HUFv06_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/*-**************************************************************
+*  FSE NCount encoding-decoding
+****************************************************************/
+static short FSEv06_abs(short a) { return a<0 ? -a : a; }
+
+size_t FSEv06_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+                 const void* headerBuffer, size_t hbSize)
+{
+    const BYTE* const istart = (const BYTE*) headerBuffer;
+    const BYTE* const iend = istart + hbSize;
+    const BYTE* ip = istart;
+    int nbBits;
+    int remaining;
+    int threshold;
+    U32 bitStream;
+    int bitCount;
+    unsigned charnum = 0;
+    int previous0 = 0;
+
+    if (hbSize < 4) return ERROR(srcSize_wrong);
+    bitStream = MEM_readLE32(ip);
+    nbBits = (bitStream & 0xF) + FSEv06_MIN_TABLELOG;   /* extract tableLog */
+    if (nbBits > FSEv06_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
+    bitStream >>= 4;
+    bitCount = 4;
+    *tableLogPtr = nbBits;
+    remaining = (1<<nbBits)+1;
+    threshold = 1<<nbBits;
+    nbBits++;
+
+    while ((remaining>1) && (charnum<=*maxSVPtr)) {
+        if (previous0) {
+            unsigned n0 = charnum;
+            while ((bitStream & 0xFFFF) == 0xFFFF) {
+                n0+=24;
+                if (ip < iend-5) {
+                    ip+=2;
+                    bitStream = MEM_readLE32(ip) >> bitCount;
+                } else {
+                    bitStream >>= 16;
+                    bitCount+=16;
+            }   }
+            while ((bitStream & 3) == 3) {
+                n0+=3;
+                bitStream>>=2;
+                bitCount+=2;
+            }
+            n0 += bitStream & 3;
+            bitCount += 2;
+            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
+            while (charnum < n0) normalizedCounter[charnum++] = 0;
+            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+                ip += bitCount>>3;
+                bitCount &= 7;
+                bitStream = MEM_readLE32(ip) >> bitCount;
+            }
+            else
+                bitStream >>= 2;
+        }
+        {   short const max = (short)((2*threshold-1)-remaining);
+            short count;
+
+            if ((bitStream & (threshold-1)) < (U32)max) {
+                count = (short)(bitStream & (threshold-1));
+                bitCount   += nbBits-1;
+            } else {
+                count = (short)(bitStream & (2*threshold-1));
+                if (count >= threshold) count -= max;
+                bitCount   += nbBits;
+            }
+
+            count--;   /* extra accuracy */
+            remaining -= FSEv06_abs(count);
+            normalizedCounter[charnum++] = count;
+            previous0 = !count;
+            while (remaining < threshold) {
+                nbBits--;
+                threshold >>= 1;
+            }
+
+            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+                ip += bitCount>>3;
+                bitCount &= 7;
+            } else {
+                bitCount -= (int)(8 * (iend - 4 - ip));
+                ip = iend - 4;
+            }
+            bitStream = MEM_readLE32(ip) >> (bitCount & 31);
+    }   }   /* while ((remaining>1) && (charnum<=*maxSVPtr)) */
+    if (remaining != 1) return ERROR(GENERIC);
+    *maxSVPtr = charnum-1;
+
+    ip += (bitCount+7)>>3;
+    if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
+    return ip-istart;
+}
+/* ******************************************************************
+   FSE : Finite State Entropy decoder
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+
+/* **************************************************************
+*  Compiler specifics
+****************************************************************/
+#ifdef _MSC_VER    /* Visual Studio */
+#  define FORCE_INLINE static __forceinline
+#  include <intrin.h>                    /* For Visual 2005 */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */
+#else
+#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
+#    ifdef __GNUC__
+#      define FORCE_INLINE static inline __attribute__((always_inline))
+#    else
+#      define FORCE_INLINE static inline
+#    endif
+#  else
+#    define FORCE_INLINE static
+#  endif /* __STDC_VERSION__ */
+#endif
+
+
+/* **************************************************************
+*  Error Management
+****************************************************************/
+#define FSEv06_isError ERR_isError
+#define FSEv06_STATIC_ASSERT(c) { enum { FSEv06_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+
+
+/* **************************************************************
+*  Complex types
+****************************************************************/
+typedef U32 DTable_max_t[FSEv06_DTABLE_SIZE_U32(FSEv06_MAX_TABLELOG)];
+
+
+/* **************************************************************
+*  Templates
+****************************************************************/
+/*
+  designed to be included
+  for type-specific functions (template emulation in C)
+  Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSEv06_FUNCTION_EXTENSION
+#  error "FSEv06_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSEv06_FUNCTION_TYPE
+#  error "FSEv06_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSEv06_CAT(X,Y) X##Y
+#define FSEv06_FUNCTION_NAME(X,Y) FSEv06_CAT(X,Y)
+#define FSEv06_TYPE_NAME(X,Y) FSEv06_CAT(X,Y)
+
+
+/* Function templates */
+FSEv06_DTable* FSEv06_createDTable (unsigned tableLog)
+{
+    if (tableLog > FSEv06_TABLELOG_ABSOLUTE_MAX) tableLog = FSEv06_TABLELOG_ABSOLUTE_MAX;
+    return (FSEv06_DTable*)malloc( FSEv06_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
+}
+
+void FSEv06_freeDTable (FSEv06_DTable* dt)
+{
+    free(dt);
+}
+
+size_t FSEv06_buildDTable(FSEv06_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+    void* const tdPtr = dt+1;   /* because *dt is unsigned, 32-bits aligned on 32-bits */
+    FSEv06_DECODE_TYPE* const tableDecode = (FSEv06_DECODE_TYPE*) (tdPtr);
+    U16 symbolNext[FSEv06_MAX_SYMBOL_VALUE+1];
+
+    U32 const maxSV1 = maxSymbolValue + 1;
+    U32 const tableSize = 1 << tableLog;
+    U32 highThreshold = tableSize-1;
+
+    /* Sanity Checks */
+    if (maxSymbolValue > FSEv06_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
+    if (tableLog > FSEv06_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+
+    /* Init, lay down lowprob symbols */
+    {   FSEv06_DTableHeader DTableH;
+        DTableH.tableLog = (U16)tableLog;
+        DTableH.fastMode = 1;
+        {   S16 const largeLimit= (S16)(1 << (tableLog-1));
+            U32 s;
+            for (s=0; s<maxSV1; s++) {
+                if (normalizedCounter[s]==-1) {
+                    tableDecode[highThreshold--].symbol = (FSEv06_FUNCTION_TYPE)s;
+                    symbolNext[s] = 1;
+                } else {
+                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
+                    symbolNext[s] = normalizedCounter[s];
+        }   }   }
+        memcpy(dt, &DTableH, sizeof(DTableH));
+    }
+
+    /* Spread symbols */
+    {   U32 const tableMask = tableSize-1;
+        U32 const step = FSEv06_TABLESTEP(tableSize);
+        U32 s, position = 0;
+        for (s=0; s<maxSV1; s++) {
+            int i;
+            for (i=0; i<normalizedCounter[s]; i++) {
+                tableDecode[position].symbol = (FSEv06_FUNCTION_TYPE)s;
+                position = (position + step) & tableMask;
+                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
+        }   }
+
+        if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+    }
+
+    /* Build Decoding table */
+    {   U32 u;
+        for (u=0; u<tableSize; u++) {
+            FSEv06_FUNCTION_TYPE const symbol = (FSEv06_FUNCTION_TYPE)(tableDecode[u].symbol);
+            U16 nextState = symbolNext[symbol]++;
+            tableDecode[u].nbBits = (BYTE) (tableLog - BITv06_highbit32 ((U32)nextState) );
+            tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
+    }   }
+
+    return 0;
+}
+
+
+
+#ifndef FSEv06_COMMONDEFS_ONLY
+
+/*-*******************************************************
+*  Decompression (Byte symbols)
+*********************************************************/
+size_t FSEv06_buildDTable_rle (FSEv06_DTable* dt, BYTE symbolValue)
+{
+    void* ptr = dt;
+    FSEv06_DTableHeader* const DTableH = (FSEv06_DTableHeader*)ptr;
+    void* dPtr = dt + 1;
+    FSEv06_decode_t* const cell = (FSEv06_decode_t*)dPtr;
+
+    DTableH->tableLog = 0;
+    DTableH->fastMode = 0;
+
+    cell->newState = 0;
+    cell->symbol = symbolValue;
+    cell->nbBits = 0;
+
+    return 0;
+}
+
+
+size_t FSEv06_buildDTable_raw (FSEv06_DTable* dt, unsigned nbBits)
+{
+    void* ptr = dt;
+    FSEv06_DTableHeader* const DTableH = (FSEv06_DTableHeader*)ptr;
+    void* dPtr = dt + 1;
+    FSEv06_decode_t* const dinfo = (FSEv06_decode_t*)dPtr;
+    const unsigned tableSize = 1 << nbBits;
+    const unsigned tableMask = tableSize - 1;
+    const unsigned maxSV1 = tableMask+1;
+    unsigned s;
+
+    /* Sanity checks */
+    if (nbBits < 1) return ERROR(GENERIC);         /* min size */
+
+    /* Build Decoding Table */
+    DTableH->tableLog = (U16)nbBits;
+    DTableH->fastMode = 1;
+    for (s=0; s<maxSV1; s++) {
+        dinfo[s].newState = 0;
+        dinfo[s].symbol = (BYTE)s;
+        dinfo[s].nbBits = (BYTE)nbBits;
+    }
+
+    return 0;
+}
+
+FORCE_INLINE size_t FSEv06_decompress_usingDTable_generic(
+          void* dst, size_t maxDstSize,
+    const void* cSrc, size_t cSrcSize,
+    const FSEv06_DTable* dt, const unsigned fast)
+{
+    BYTE* const ostart = (BYTE*) dst;
+    BYTE* op = ostart;
+    BYTE* const omax = op + maxDstSize;
+    BYTE* const olimit = omax-3;
+
+    BITv06_DStream_t bitD;
+    FSEv06_DState_t state1;
+    FSEv06_DState_t state2;
+
+    /* Init */
+    { size_t const errorCode = BITv06_initDStream(&bitD, cSrc, cSrcSize);   /* replaced last arg by maxCompressed Size */
+      if (FSEv06_isError(errorCode)) return errorCode; }
+
+    FSEv06_initDState(&state1, &bitD, dt);
+    FSEv06_initDState(&state2, &bitD, dt);
+
+#define FSEv06_GETSYMBOL(statePtr) fast ? FSEv06_decodeSymbolFast(statePtr, &bitD) : FSEv06_decodeSymbol(statePtr, &bitD)
+
+    /* 4 symbols per loop */
+    for ( ; (BITv06_reloadDStream(&bitD)==BITv06_DStream_unfinished) && (op<olimit) ; op+=4) {
+        op[0] = FSEv06_GETSYMBOL(&state1);
+
+        if (FSEv06_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            BITv06_reloadDStream(&bitD);
+
+        op[1] = FSEv06_GETSYMBOL(&state2);
+
+        if (FSEv06_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            { if (BITv06_reloadDStream(&bitD) > BITv06_DStream_unfinished) { op+=2; break; } }
+
+        op[2] = FSEv06_GETSYMBOL(&state1);
+
+        if (FSEv06_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            BITv06_reloadDStream(&bitD);
+
+        op[3] = FSEv06_GETSYMBOL(&state2);
+    }
+
+    /* tail */
+    /* note : BITv06_reloadDStream(&bitD) >= FSEv06_DStream_partiallyFilled; Ends at exactly BITv06_DStream_completed */
+    while (1) {
+        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+
+        *op++ = FSEv06_GETSYMBOL(&state1);
+
+        if (BITv06_reloadDStream(&bitD)==BITv06_DStream_overflow) {
+            *op++ = FSEv06_GETSYMBOL(&state2);
+            break;
+        }
+
+        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+
+        *op++ = FSEv06_GETSYMBOL(&state2);
+
+        if (BITv06_reloadDStream(&bitD)==BITv06_DStream_overflow) {
+            *op++ = FSEv06_GETSYMBOL(&state1);
+            break;
+    }   }
+
+    return op-ostart;
+}
+
+
+size_t FSEv06_decompress_usingDTable(void* dst, size_t originalSize,
+                            const void* cSrc, size_t cSrcSize,
+                            const FSEv06_DTable* dt)
+{
+    const void* ptr = dt;
+    const FSEv06_DTableHeader* DTableH = (const FSEv06_DTableHeader*)ptr;
+    const U32 fastMode = DTableH->fastMode;
+
+    /* select fast mode (static) */
+    if (fastMode) return FSEv06_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+    return FSEv06_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+
+size_t FSEv06_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
+{
+    const BYTE* const istart = (const BYTE*)cSrc;
+    const BYTE* ip = istart;
+    short counting[FSEv06_MAX_SYMBOL_VALUE+1];
+    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */
+    unsigned tableLog;
+    unsigned maxSymbolValue = FSEv06_MAX_SYMBOL_VALUE;
+
+    if (cSrcSize<2) return ERROR(srcSize_wrong);   /* too small input size */
+
+    /* normal FSE decoding mode */
+    {   size_t const NCountLength = FSEv06_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
+        if (FSEv06_isError(NCountLength)) return NCountLength;
+        if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size */
+        ip += NCountLength;
+        cSrcSize -= NCountLength;
+    }
+
+    { size_t const errorCode = FSEv06_buildDTable (dt, counting, maxSymbolValue, tableLog);
+      if (FSEv06_isError(errorCode)) return errorCode; }
+
+    return FSEv06_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);   /* always return, even if it is an error code */
+}
+
+
+
+#endif   /* FSEv06_COMMONDEFS_ONLY */
+/* ******************************************************************
+   Huffman coder, part of New Generation Entropy library
+   header file
+   Copyright (C) 2013-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef HUFv06_H
+#define HUFv06_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* ****************************************
+*  HUF simple functions
+******************************************/
+size_t HUFv06_decompress(void* dst,  size_t dstSize,
+                const void* cSrc, size_t cSrcSize);
+/*
+HUFv06_decompress() :
+    Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
+    into already allocated destination buffer 'dst', of size 'dstSize'.
+    `dstSize` : must be the **exact** size of original (uncompressed) data.
+    Note : in contrast with FSE, HUFv06_decompress can regenerate
+           RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
+           because it knows size to regenerate.
+    @return : size of regenerated data (== dstSize)
+              or an error code, which can be tested using HUFv06_isError()
+*/
+
+
+/* ****************************************
+*  Tool functions
+******************************************/
+size_t HUFv06_compressBound(size_t size);       /**< maximum compressed size */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif   /* HUFv06_H */
+/* ******************************************************************
+   Huffman codec, part of New Generation Entropy library
+   header file, for static linking only
+   Copyright (C) 2013-2016, Yann Collet
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef HUFv06_STATIC_H
+#define HUFv06_STATIC_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/* ****************************************
+*  Static allocation
+******************************************/
+/* HUF buffer bounds */
+#define HUFv06_CTABLEBOUND 129
+#define HUFv06_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true if incompressible pre-filtered with fast heuristic */
+#define HUFv06_COMPRESSBOUND(size) (HUFv06_CTABLEBOUND + HUFv06_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
+
+/* static allocation of HUF's DTable */
+#define HUFv06_DTABLE_SIZE(maxTableLog)   (1 + (1<<maxTableLog))
+#define HUFv06_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
+        unsigned short DTable[HUFv06_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
+#define HUFv06_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
+        unsigned int DTable[HUFv06_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
+#define HUFv06_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
+        unsigned int DTable[HUFv06_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
+
+
+/* ****************************************
+*  Advanced decompression functions
+******************************************/
+size_t HUFv06_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
+size_t HUFv06_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbols decoder */
+
+
+
+/*!
+HUFv06_decompress() does the following:
+1. select the decompression algorithm (X2, X4, X6) based on pre-computed heuristics
+2. build Huffman table from save, using HUFv06_readDTableXn()
+3. decode 1 or 4 segments in parallel using HUFv06_decompressSXn_usingDTable
+*/
+size_t HUFv06_readDTableX2 (unsigned short* DTable, const void* src, size_t srcSize);
+size_t HUFv06_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize);
+
+size_t HUFv06_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);
+size_t HUFv06_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);
+
+
+/* single stream variants */
+size_t HUFv06_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
+size_t HUFv06_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbol decoder */
+
+size_t HUFv06_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);
+size_t HUFv06_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);
+
+
+
+/* **************************************************************
+*  Constants
+****************************************************************/
+#define HUFv06_ABSOLUTEMAX_TABLELOG  16   /* absolute limit of HUFv06_MAX_TABLELOG. Beyond that value, code does not work */
+#define HUFv06_MAX_TABLELOG  12           /* max configured tableLog (for static allocation); can be modified up to HUFv06_ABSOLUTEMAX_TABLELOG */
+#define HUFv06_DEFAULT_TABLELOG  HUFv06_MAX_TABLELOG   /* tableLog by default, when not specified */
+#define HUFv06_MAX_SYMBOL_VALUE 255
+#if (HUFv06_MAX_TABLELOG > HUFv06_ABSOLUTEMAX_TABLELOG)
+#  error "HUFv06_MAX_TABLELOG is too large !"
+#endif
+
+
+
+/*! HUFv06_readStats() :
+    Read compact Huffman tree, saved by HUFv06_writeCTable().
+    `huffWeight` is destination buffer.
+    @return : size read from `src`
+*/
+MEM_STATIC size_t HUFv06_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+                            U32* nbSymbolsPtr, U32* tableLogPtr,
+                            const void* src, size_t srcSize)
+{
+    U32 weightTotal;
+    const BYTE* ip = (const BYTE*) src;
+    size_t iSize;
+    size_t oSize;
+
+    if (!srcSize) return ERROR(srcSize_wrong);
+    iSize = ip[0];
+    //memset(huffWeight, 0, hwSize);   /* is not necessary, even though some analyzer complain ... */
+
+    if (iSize >= 128)  { /* special header */
+        if (iSize >= (242)) {  /* RLE */
+            static U32 l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
+            oSize = l[iSize-242];
+            memset(huffWeight, 1, hwSize);
+            iSize = 0;
+        }
+        else {   /* Incompressible */
+            oSize = iSize - 127;
+            iSize = ((oSize+1)/2);
+            if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+            if (oSize >= hwSize) return ERROR(corruption_detected);
+            ip += 1;
+            {   U32 n;
+                for (n=0; n<oSize; n+=2) {
+                    huffWeight[n]   = ip[n/2] >> 4;
+                    huffWeight[n+1] = ip[n/2] & 15;
+    }   }   }   }
+    else  {   /* header compressed with FSE (normal case) */
+        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+        oSize = FSEv06_decompress(huffWeight, hwSize-1, ip+1, iSize);   /* max (hwSize-1) values decoded, as last one is implied */
+        if (FSEv06_isError(oSize)) return oSize;
+    }
+
+    /* collect weight stats */
+    memset(rankStats, 0, (HUFv06_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
+    weightTotal = 0;
+    {   U32 n; for (n=0; n<oSize; n++) {
+            if (huffWeight[n] >= HUFv06_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
+            rankStats[huffWeight[n]]++;
+            weightTotal += (1 << huffWeight[n]) >> 1;
+    }   }
+    if (weightTotal == 0) return ERROR(corruption_detected);
+
+    /* get last non-null symbol weight (implied, total must be 2^n) */
+    {   U32 const tableLog = BITv06_highbit32(weightTotal) + 1;
+        if (tableLog > HUFv06_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
+        *tableLogPtr = tableLog;
+        /* determine last weight */
+        {   U32 const total = 1 << tableLog;
+            U32 const rest = total - weightTotal;
+            U32 const verif = 1 << BITv06_highbit32(rest);
+            U32 const lastWeight = BITv06_highbit32(rest) + 1;
+            if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */
+            huffWeight[oSize] = (BYTE)lastWeight;
+            rankStats[lastWeight]++;
+    }   }
+
+    /* check tree construction validity */
+    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */
+
+    /* results */
+    *nbSymbolsPtr = (U32)(oSize+1);
+    return iSize+1;
+}
+
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* HUFv06_STATIC_H */
+/* ******************************************************************
+   Huffman decoder, part of New Generation Entropy library
+   Copyright (C) 2013-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* **************************************************************
+*  Compiler specifics
+****************************************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+/* inline is defined */
+#elif defined(_MSC_VER)
+#  define inline __inline
+#else
+#  define inline /* disable inline */
+#endif
+
+
+#ifdef _MSC_VER    /* Visual Studio */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#endif
+
+
+
+/* **************************************************************
+*  Error Management
+****************************************************************/
+#define HUFv06_STATIC_ASSERT(c) { enum { HUFv06_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+
+
+
+/* *******************************************************
+*  HUF : Huffman block decompression
+*********************************************************/
+typedef struct { BYTE byte; BYTE nbBits; } HUFv06_DEltX2;   /* single-symbol decoding */
+
+typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUFv06_DEltX4;  /* double-symbols decoding */
+
+typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
+
+
+
+/*-***************************/
+/*  single-symbol decoding   */
+/*-***************************/
+
+size_t HUFv06_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
+{
+    BYTE huffWeight[HUFv06_MAX_SYMBOL_VALUE + 1];
+    U32 rankVal[HUFv06_ABSOLUTEMAX_TABLELOG + 1];   /* large enough for values from 0 to 16 */
+    U32 tableLog = 0;
+    size_t iSize;
+    U32 nbSymbols = 0;
+    U32 n;
+    U32 nextRankStart;
+    void* const dtPtr = DTable + 1;
+    HUFv06_DEltX2* const dt = (HUFv06_DEltX2*)dtPtr;
+
+    HUFv06_STATIC_ASSERT(sizeof(HUFv06_DEltX2) == sizeof(U16));   /* if compilation fails here, assertion is false */
+    //memset(huffWeight, 0, sizeof(huffWeight));   /* is not necessary, even though some analyzer complain ... */
+
+    iSize = HUFv06_readStats(huffWeight, HUFv06_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
+    if (HUFv06_isError(iSize)) return iSize;
+
+    /* check result */
+    if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge);   /* DTable is too small */
+    DTable[0] = (U16)tableLog;   /* maybe should separate sizeof allocated DTable, from used size of DTable, in case of re-use */
+
+    /* Prepare ranks */
+    nextRankStart = 0;
+    for (n=1; n<tableLog+1; n++) {
+        U32 current = nextRankStart;
+        nextRankStart += (rankVal[n] << (n-1));
+        rankVal[n] = current;
+    }
+
+    /* fill DTable */
+    for (n=0; n<nbSymbols; n++) {
+        const U32 w = huffWeight[n];
+        const U32 length = (1 << w) >> 1;
+        U32 i;
+        HUFv06_DEltX2 D;
+        D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
+        for (i = rankVal[w]; i < rankVal[w] + length; i++)
+            dt[i] = D;
+        rankVal[w] += length;
+    }
+
+    return iSize;
+}
+
+
+static BYTE HUFv06_decodeSymbolX2(BITv06_DStream_t* Dstream, const HUFv06_DEltX2* dt, const U32 dtLog)
+{
+    const size_t val = BITv06_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+    const BYTE c = dt[val].byte;
+    BITv06_skipBits(Dstream, dt[val].nbBits);
+    return c;
+}
+
+#define HUFv06_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+    *ptr++ = HUFv06_decodeSymbolX2(DStreamPtr, dt, dtLog)
+
+#define HUFv06_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+    if (MEM_64bits() || (HUFv06_MAX_TABLELOG<=12)) \
+        HUFv06_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+#define HUFv06_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+    if (MEM_64bits()) \
+        HUFv06_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+static inline size_t HUFv06_decodeStreamX2(BYTE* p, BITv06_DStream_t* const bitDPtr, BYTE* const pEnd, const HUFv06_DEltX2* const dt, const U32 dtLog)
+{
+    BYTE* const pStart = p;
+
+    /* up to 4 symbols at a time */
+    while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p <= pEnd-4)) {
+        HUFv06_DECODE_SYMBOLX2_2(p, bitDPtr);
+        HUFv06_DECODE_SYMBOLX2_1(p, bitDPtr);
+        HUFv06_DECODE_SYMBOLX2_2(p, bitDPtr);
+        HUFv06_DECODE_SYMBOLX2_0(p, bitDPtr);
+    }
+
+    /* closer to the end */
+    while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p < pEnd))
+        HUFv06_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+    /* no more data to retrieve from bitstream, hence no need to reload */
+    while (p < pEnd)
+        HUFv06_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+    return pEnd-pStart;
+}
+
+size_t HUFv06_decompress1X2_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const U16* DTable)
+{
+    BYTE* op = (BYTE*)dst;
+    BYTE* const oend = op + dstSize;
+    const U32 dtLog = DTable[0];
+    const void* dtPtr = DTable;
+    const HUFv06_DEltX2* const dt = ((const HUFv06_DEltX2*)dtPtr)+1;
+    BITv06_DStream_t bitD;
+
+    { size_t const errorCode = BITv06_initDStream(&bitD, cSrc, cSrcSize);
+      if (HUFv06_isError(errorCode)) return errorCode; }
+
+    HUFv06_decodeStreamX2(op, &bitD, oend, dt, dtLog);
+
+    /* check */
+    if (!BITv06_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+    return dstSize;
+}
+
+size_t HUFv06_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUFv06_CREATE_STATIC_DTABLEX2(DTable, HUFv06_MAX_TABLELOG);
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t const errorCode = HUFv06_readDTableX2 (DTable, cSrc, cSrcSize);
+    if (HUFv06_isError(errorCode)) return errorCode;
+    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += errorCode;
+    cSrcSize -= errorCode;
+
+    return HUFv06_decompress1X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+size_t HUFv06_decompress4X2_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const U16* DTable)
+{
+    /* Check */
+    if (cSrcSize < 10) return ERROR(corruption_detected);  /* strict minimum : jump table + 1 byte per stream */
+
+    {   const BYTE* const istart = (const BYTE*) cSrc;
+        BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+        const void* const dtPtr = DTable;
+        const HUFv06_DEltX2* const dt = ((const HUFv06_DEltX2*)dtPtr) +1;
+        const U32 dtLog = DTable[0];
+        size_t errorCode;
+
+        /* Init */
+        BITv06_DStream_t bitD1;
+        BITv06_DStream_t bitD2;
+        BITv06_DStream_t bitD3;
+        BITv06_DStream_t bitD4;
+        const size_t length1 = MEM_readLE16(istart);
+        const size_t length2 = MEM_readLE16(istart+2);
+        const size_t length3 = MEM_readLE16(istart+4);
+        size_t length4;
+        const BYTE* const istart1 = istart + 6;  /* jumpTable */
+        const BYTE* const istart2 = istart1 + length1;
+        const BYTE* const istart3 = istart2 + length2;
+        const BYTE* const istart4 = istart3 + length3;
+        const size_t segmentSize = (dstSize+3) / 4;
+        BYTE* const opStart2 = ostart + segmentSize;
+        BYTE* const opStart3 = opStart2 + segmentSize;
+        BYTE* const opStart4 = opStart3 + segmentSize;
+        BYTE* op1 = ostart;
+        BYTE* op2 = opStart2;
+        BYTE* op3 = opStart3;
+        BYTE* op4 = opStart4;
+        U32 endSignal;
+
+        length4 = cSrcSize - (length1 + length2 + length3 + 6);
+        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
+        errorCode = BITv06_initDStream(&bitD1, istart1, length1);
+        if (HUFv06_isError(errorCode)) return errorCode;
+        errorCode = BITv06_initDStream(&bitD2, istart2, length2);
+        if (HUFv06_isError(errorCode)) return errorCode;
+        errorCode = BITv06_initDStream(&bitD3, istart3, length3);
+        if (HUFv06_isError(errorCode)) return errorCode;
+        errorCode = BITv06_initDStream(&bitD4, istart4, length4);
+        if (HUFv06_isError(errorCode)) return errorCode;
+
+        /* 16-32 symbols per loop (4-8 symbols per stream) */
+        endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4);
+        for ( ; (endSignal==BITv06_DStream_unfinished) && (op4<(oend-7)) ; ) {
+            HUFv06_DECODE_SYMBOLX2_2(op1, &bitD1);
+            HUFv06_DECODE_SYMBOLX2_2(op2, &bitD2);
+            HUFv06_DECODE_SYMBOLX2_2(op3, &bitD3);
+            HUFv06_DECODE_SYMBOLX2_2(op4, &bitD4);
+            HUFv06_DECODE_SYMBOLX2_1(op1, &bitD1);
+            HUFv06_DECODE_SYMBOLX2_1(op2, &bitD2);
+            HUFv06_DECODE_SYMBOLX2_1(op3, &bitD3);
+            HUFv06_DECODE_SYMBOLX2_1(op4, &bitD4);
+            HUFv06_DECODE_SYMBOLX2_2(op1, &bitD1);
+            HUFv06_DECODE_SYMBOLX2_2(op2, &bitD2);
+            HUFv06_DECODE_SYMBOLX2_2(op3, &bitD3);
+            HUFv06_DECODE_SYMBOLX2_2(op4, &bitD4);
+            HUFv06_DECODE_SYMBOLX2_0(op1, &bitD1);
+            HUFv06_DECODE_SYMBOLX2_0(op2, &bitD2);
+            HUFv06_DECODE_SYMBOLX2_0(op3, &bitD3);
+            HUFv06_DECODE_SYMBOLX2_0(op4, &bitD4);
+            endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4);
+        }
+
+        /* check corruption */
+        if (op1 > opStart2) return ERROR(corruption_detected);
+        if (op2 > opStart3) return ERROR(corruption_detected);
+        if (op3 > opStart4) return ERROR(corruption_detected);
+        /* note : op4 supposed already verified within main loop */
+
+        /* finish bitStreams one by one */
+        HUFv06_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+        HUFv06_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+        HUFv06_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+        HUFv06_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
+
+        /* check */
+        endSignal = BITv06_endOfDStream(&bitD1) & BITv06_endOfDStream(&bitD2) & BITv06_endOfDStream(&bitD3) & BITv06_endOfDStream(&bitD4);
+        if (!endSignal) return ERROR(corruption_detected);
+
+        /* decoded size */
+        return dstSize;
+    }
+}
+
+
+size_t HUFv06_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUFv06_CREATE_STATIC_DTABLEX2(DTable, HUFv06_MAX_TABLELOG);
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t const errorCode = HUFv06_readDTableX2 (DTable, cSrc, cSrcSize);
+    if (HUFv06_isError(errorCode)) return errorCode;
+    if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += errorCode;
+    cSrcSize -= errorCode;
+
+    return HUFv06_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+/* *************************/
+/* double-symbols decoding */
+/* *************************/
+
+static void HUFv06_fillDTableX4Level2(HUFv06_DEltX4* DTable, U32 sizeLog, const U32 consumed,
+                           const U32* rankValOrigin, const int minWeight,
+                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
+                           U32 nbBitsBaseline, U16 baseSeq)
+{
+    HUFv06_DEltX4 DElt;
+    U32 rankVal[HUFv06_ABSOLUTEMAX_TABLELOG + 1];
+
+    /* get pre-calculated rankVal */
+    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+    /* fill skipped values */
+    if (minWeight>1) {
+        U32 i, skipSize = rankVal[minWeight];
+        MEM_writeLE16(&(DElt.sequence), baseSeq);
+        DElt.nbBits   = (BYTE)(consumed);
+        DElt.length   = 1;
+        for (i = 0; i < skipSize; i++)
+            DTable[i] = DElt;
+    }
+
+    /* fill DTable */
+    { U32 s; for (s=0; s<sortedListSize; s++) {   /* note : sortedSymbols already skipped */
+        const U32 symbol = sortedSymbols[s].symbol;
+        const U32 weight = sortedSymbols[s].weight;
+        const U32 nbBits = nbBitsBaseline - weight;
+        const U32 length = 1 << (sizeLog-nbBits);
+        const U32 start = rankVal[weight];
+        U32 i = start;
+        const U32 end = start + length;
+
+        MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
+        DElt.nbBits = (BYTE)(nbBits + consumed);
+        DElt.length = 2;
+        do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */
+
+        rankVal[weight] += length;
+    }}
+}
+
+typedef U32 rankVal_t[HUFv06_ABSOLUTEMAX_TABLELOG][HUFv06_ABSOLUTEMAX_TABLELOG + 1];
+
+static void HUFv06_fillDTableX4(HUFv06_DEltX4* DTable, const U32 targetLog,
+                           const sortedSymbol_t* sortedList, const U32 sortedListSize,
+                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
+                           const U32 nbBitsBaseline)
+{
+    U32 rankVal[HUFv06_ABSOLUTEMAX_TABLELOG + 1];
+    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */
+    const U32 minBits  = nbBitsBaseline - maxWeight;
+    U32 s;
+
+    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+    /* fill DTable */
+    for (s=0; s<sortedListSize; s++) {
+        const U16 symbol = sortedList[s].symbol;
+        const U32 weight = sortedList[s].weight;
+        const U32 nbBits = nbBitsBaseline - weight;
+        const U32 start = rankVal[weight];
+        const U32 length = 1 << (targetLog-nbBits);
+
+        if (targetLog-nbBits >= minBits) {   /* enough room for a second symbol */
+            U32 sortedRank;
+            int minWeight = nbBits + scaleLog;
+            if (minWeight < 1) minWeight = 1;
+            sortedRank = rankStart[minWeight];
+            HUFv06_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
+                           rankValOrigin[nbBits], minWeight,
+                           sortedList+sortedRank, sortedListSize-sortedRank,
+                           nbBitsBaseline, symbol);
+        } else {
+            HUFv06_DEltX4 DElt;
+            MEM_writeLE16(&(DElt.sequence), symbol);
+            DElt.nbBits = (BYTE)(nbBits);
+            DElt.length = 1;
+            {   U32 u;
+                const U32 end = start + length;
+                for (u = start; u < end; u++) DTable[u] = DElt;
+        }   }
+        rankVal[weight] += length;
+    }
+}
+
+size_t HUFv06_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
+{
+    BYTE weightList[HUFv06_MAX_SYMBOL_VALUE + 1];
+    sortedSymbol_t sortedSymbol[HUFv06_MAX_SYMBOL_VALUE + 1];
+    U32 rankStats[HUFv06_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
+    U32 rankStart0[HUFv06_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
+    U32* const rankStart = rankStart0+1;
+    rankVal_t rankVal;
+    U32 tableLog, maxW, sizeOfSort, nbSymbols;
+    const U32 memLog = DTable[0];
+    size_t iSize;
+    void* dtPtr = DTable;
+    HUFv06_DEltX4* const dt = ((HUFv06_DEltX4*)dtPtr) + 1;
+
+    HUFv06_STATIC_ASSERT(sizeof(HUFv06_DEltX4) == sizeof(U32));   /* if compilation fails here, assertion is false */
+    if (memLog > HUFv06_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
+    //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */
+
+    iSize = HUFv06_readStats(weightList, HUFv06_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
+    if (HUFv06_isError(iSize)) return iSize;
+
+    /* check result */
+    if (tableLog > memLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */
+
+    /* find maxWeight */
+    for (maxW = tableLog; rankStats[maxW]==0; maxW--) {}  /* necessarily finds a solution before 0 */
+
+    /* Get start index of each weight */
+    {   U32 w, nextRankStart = 0;
+        for (w=1; w<maxW+1; w++) {
+            U32 current = nextRankStart;
+            nextRankStart += rankStats[w];
+            rankStart[w] = current;
+        }
+        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/
+        sizeOfSort = nextRankStart;
+    }
+
+    /* sort symbols by weight */
+    {   U32 s;
+        for (s=0; s<nbSymbols; s++) {
+            U32 const w = weightList[s];
+            U32 const r = rankStart[w]++;
+            sortedSymbol[r].symbol = (BYTE)s;
+            sortedSymbol[r].weight = (BYTE)w;
+        }
+        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
+    }
+
+    /* Build rankVal */
+    {   U32* const rankVal0 = rankVal[0];
+        {   int const rescale = (memLog-tableLog) - 1;   /* tableLog <= memLog */
+            U32 nextRankVal = 0;
+            U32 w;
+            for (w=1; w<maxW+1; w++) {
+                U32 current = nextRankVal;
+                nextRankVal += rankStats[w] << (w+rescale);
+                rankVal0[w] = current;
+        }   }
+        {   U32 const minBits = tableLog+1 - maxW;
+            U32 consumed;
+            for (consumed = minBits; consumed < memLog - minBits + 1; consumed++) {
+                U32* const rankValPtr = rankVal[consumed];
+                U32 w;
+                for (w = 1; w < maxW+1; w++) {
+                    rankValPtr[w] = rankVal0[w] >> consumed;
+    }   }   }   }
+
+    HUFv06_fillDTableX4(dt, memLog,
+                   sortedSymbol, sizeOfSort,
+                   rankStart0, rankVal, maxW,
+                   tableLog+1);
+
+    return iSize;
+}
+
+
+static U32 HUFv06_decodeSymbolX4(void* op, BITv06_DStream_t* DStream, const HUFv06_DEltX4* dt, const U32 dtLog)
+{
+    const size_t val = BITv06_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    memcpy(op, dt+val, 2);
+    BITv06_skipBits(DStream, dt[val].nbBits);
+    return dt[val].length;
+}
+
+static U32 HUFv06_decodeLastSymbolX4(void* op, BITv06_DStream_t* DStream, const HUFv06_DEltX4* dt, const U32 dtLog)
+{
+    const size_t val = BITv06_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    memcpy(op, dt+val, 1);
+    if (dt[val].length==1) BITv06_skipBits(DStream, dt[val].nbBits);
+    else {
+        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
+            BITv06_skipBits(DStream, dt[val].nbBits);
+            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+    }   }
+    return 1;
+}
+
+
+#define HUFv06_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
+    ptr += HUFv06_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUFv06_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
+    if (MEM_64bits() || (HUFv06_MAX_TABLELOG<=12)) \
+        ptr += HUFv06_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUFv06_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
+    if (MEM_64bits()) \
+        ptr += HUFv06_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+static inline size_t HUFv06_decodeStreamX4(BYTE* p, BITv06_DStream_t* bitDPtr, BYTE* const pEnd, const HUFv06_DEltX4* const dt, const U32 dtLog)
+{
+    BYTE* const pStart = p;
+
+    /* up to 8 symbols at a time */
+    while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p < pEnd-7)) {
+        HUFv06_DECODE_SYMBOLX4_2(p, bitDPtr);
+        HUFv06_DECODE_SYMBOLX4_1(p, bitDPtr);
+        HUFv06_DECODE_SYMBOLX4_2(p, bitDPtr);
+        HUFv06_DECODE_SYMBOLX4_0(p, bitDPtr);
+    }
+
+    /* closer to the end */
+    while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p <= pEnd-2))
+        HUFv06_DECODE_SYMBOLX4_0(p, bitDPtr);
+
+    while (p <= pEnd-2)
+        HUFv06_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
+
+    if (p < pEnd)
+        p += HUFv06_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
+
+    return p-pStart;
+}
+
+
+size_t HUFv06_decompress1X4_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const U32* DTable)
+{
+    const BYTE* const istart = (const BYTE*) cSrc;
+    BYTE* const ostart = (BYTE*) dst;
+    BYTE* const oend = ostart + dstSize;
+
+    const U32 dtLog = DTable[0];
+    const void* const dtPtr = DTable;
+    const HUFv06_DEltX4* const dt = ((const HUFv06_DEltX4*)dtPtr) +1;
+
+    /* Init */
+    BITv06_DStream_t bitD;
+    { size_t const errorCode = BITv06_initDStream(&bitD, istart, cSrcSize);
+      if (HUFv06_isError(errorCode)) return errorCode; }
+
+    /* decode */
+    HUFv06_decodeStreamX4(ostart, &bitD, oend, dt, dtLog);
+
+    /* check */
+    if (!BITv06_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+    /* decoded size */
+    return dstSize;
+}
+
+size_t HUFv06_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUFv06_CREATE_STATIC_DTABLEX4(DTable, HUFv06_MAX_TABLELOG);
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t const hSize = HUFv06_readDTableX4 (DTable, cSrc, cSrcSize);
+    if (HUFv06_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize;
+    cSrcSize -= hSize;
+
+    return HUFv06_decompress1X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+size_t HUFv06_decompress4X4_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const U32* DTable)
+{
+    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
+
+    {   const BYTE* const istart = (const BYTE*) cSrc;
+        BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+        const void* const dtPtr = DTable;
+        const HUFv06_DEltX4* const dt = ((const HUFv06_DEltX4*)dtPtr) +1;
+        const U32 dtLog = DTable[0];
+        size_t errorCode;
+
+        /* Init */
+        BITv06_DStream_t bitD1;
+        BITv06_DStream_t bitD2;
+        BITv06_DStream_t bitD3;
+        BITv06_DStream_t bitD4;
+        const size_t length1 = MEM_readLE16(istart);
+        const size_t length2 = MEM_readLE16(istart+2);
+        const size_t length3 = MEM_readLE16(istart+4);
+        size_t length4;
+        const BYTE* const istart1 = istart + 6;  /* jumpTable */
+        const BYTE* const istart2 = istart1 + length1;
+        const BYTE* const istart3 = istart2 + length2;
+        const BYTE* const istart4 = istart3 + length3;
+        const size_t segmentSize = (dstSize+3) / 4;
+        BYTE* const opStart2 = ostart + segmentSize;
+        BYTE* const opStart3 = opStart2 + segmentSize;
+        BYTE* const opStart4 = opStart3 + segmentSize;
+        BYTE* op1 = ostart;
+        BYTE* op2 = opStart2;
+        BYTE* op3 = opStart3;
+        BYTE* op4 = opStart4;
+        U32 endSignal;
+
+        length4 = cSrcSize - (length1 + length2 + length3 + 6);
+        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
+        errorCode = BITv06_initDStream(&bitD1, istart1, length1);
+        if (HUFv06_isError(errorCode)) return errorCode;
+        errorCode = BITv06_initDStream(&bitD2, istart2, length2);
+        if (HUFv06_isError(errorCode)) return errorCode;
+        errorCode = BITv06_initDStream(&bitD3, istart3, length3);
+        if (HUFv06_isError(errorCode)) return errorCode;
+        errorCode = BITv06_initDStream(&bitD4, istart4, length4);
+        if (HUFv06_isError(errorCode)) return errorCode;
+
+        /* 16-32 symbols per loop (4-8 symbols per stream) */
+        endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4);
+        for ( ; (endSignal==BITv06_DStream_unfinished) && (op4<(oend-7)) ; ) {
+            HUFv06_DECODE_SYMBOLX4_2(op1, &bitD1);
+            HUFv06_DECODE_SYMBOLX4_2(op2, &bitD2);
+            HUFv06_DECODE_SYMBOLX4_2(op3, &bitD3);
+            HUFv06_DECODE_SYMBOLX4_2(op4, &bitD4);
+            HUFv06_DECODE_SYMBOLX4_1(op1, &bitD1);
+            HUFv06_DECODE_SYMBOLX4_1(op2, &bitD2);
+            HUFv06_DECODE_SYMBOLX4_1(op3, &bitD3);
+            HUFv06_DECODE_SYMBOLX4_1(op4, &bitD4);
+            HUFv06_DECODE_SYMBOLX4_2(op1, &bitD1);
+            HUFv06_DECODE_SYMBOLX4_2(op2, &bitD2);
+            HUFv06_DECODE_SYMBOLX4_2(op3, &bitD3);
+            HUFv06_DECODE_SYMBOLX4_2(op4, &bitD4);
+            HUFv06_DECODE_SYMBOLX4_0(op1, &bitD1);
+            HUFv06_DECODE_SYMBOLX4_0(op2, &bitD2);
+            HUFv06_DECODE_SYMBOLX4_0(op3, &bitD3);
+            HUFv06_DECODE_SYMBOLX4_0(op4, &bitD4);
+
+            endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4);
+        }
+
+        /* check corruption */
+        if (op1 > opStart2) return ERROR(corruption_detected);
+        if (op2 > opStart3) return ERROR(corruption_detected);
+        if (op3 > opStart4) return ERROR(corruption_detected);
+        /* note : op4 supposed already verified within main loop */
+
+        /* finish bitStreams one by one */
+        HUFv06_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
+        HUFv06_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
+        HUFv06_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
+        HUFv06_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);
+
+        /* check */
+        endSignal = BITv06_endOfDStream(&bitD1) & BITv06_endOfDStream(&bitD2) & BITv06_endOfDStream(&bitD3) & BITv06_endOfDStream(&bitD4);
+        if (!endSignal) return ERROR(corruption_detected);
+
+        /* decoded size */
+        return dstSize;
+    }
+}
+
+
+size_t HUFv06_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUFv06_CREATE_STATIC_DTABLEX4(DTable, HUFv06_MAX_TABLELOG);
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t hSize = HUFv06_readDTableX4 (DTable, cSrc, cSrcSize);
+    if (HUFv06_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize;
+    cSrcSize -= hSize;
+
+    return HUFv06_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
+}
+
+
+
+
+/* ********************************/
+/* Generic decompression selector */
+/* ********************************/
+
+typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
+static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
+{
+    /* single, double, quad */
+    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */
+    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */
+    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */
+    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */
+    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */
+    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */
+    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */
+    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */
+    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */
+    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */
+    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
+    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
+    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
+    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */
+    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */
+    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */
+};
+
+typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+
+size_t HUFv06_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    static const decompressionAlgo decompress[3] = { HUFv06_decompress4X2, HUFv06_decompress4X4, NULL };
+    U32 Dtime[3];   /* decompression time estimation */
+
+    /* validation checks */
+    if (dstSize == 0) return ERROR(dstSize_tooSmall);
+    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
+    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
+    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
+
+    /* decoder timing evaluation */
+    {   U32 const Q = (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 since dstSize > cSrcSize */
+        U32 const D256 = (U32)(dstSize >> 8);
+        U32 n; for (n=0; n<3; n++)
+            Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
+    }
+
+    Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
+
+    {   U32 algoNb = 0;
+        if (Dtime[1] < Dtime[0]) algoNb = 1;
+        // if (Dtime[2] < Dtime[algoNb]) algoNb = 2;   /* current speed of HUFv06_decompress4X6 is not good */
+        return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
+    }
+
+    //return HUFv06_decompress4X2(dst, dstSize, cSrc, cSrcSize);   /* multi-streams single-symbol decoding */
+    //return HUFv06_decompress4X4(dst, dstSize, cSrc, cSrcSize);   /* multi-streams double-symbols decoding */
+    //return HUFv06_decompress4X6(dst, dstSize, cSrc, cSrcSize);   /* multi-streams quad-symbols decoding */
+}
+/*
+    Common functions of Zstd compression library
+    Copyright (C) 2015-2016, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd homepage : http://www.zstd.net/
+*/
+
+
+/*-****************************************
+*  Version
+******************************************/
+
+/*-****************************************
+*  ZSTD Error Management
+******************************************/
+/*! ZSTDv06_isError() :
+*   tells if a return value is an error code */
+unsigned ZSTDv06_isError(size_t code) { return ERR_isError(code); }
+
+/*! ZSTDv06_getErrorName() :
+*   provides error code string from function result (useful for debugging) */
+const char* ZSTDv06_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/* **************************************************************
+*  ZBUFF Error Management
+****************************************************************/
+unsigned ZBUFFv06_isError(size_t errorCode) { return ERR_isError(errorCode); }
+
+const char* ZBUFFv06_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
+/*
+    zstd - standard compression library
+    Copyright (C) 2014-2016, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd homepage : http://www.zstd.net
+*/
+
+/* ***************************************************************
+*  Tuning parameters
+*****************************************************************/
+/*!
+ * HEAPMODE :
+ * Select how default decompression function ZSTDv06_decompress() will allocate memory,
+ * in memory stack (0), or in memory heap (1, requires malloc())
+ */
+#ifndef ZSTDv06_HEAPMODE
+#  define ZSTDv06_HEAPMODE 1
+#endif
+
+
+
+/*-*******************************************************
+*  Compiler specifics
+*********************************************************/
+#ifdef _MSC_VER    /* Visual Studio */
+#  include <intrin.h>                    /* For Visual 2005 */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */
+#endif
+
+
+/*-*************************************
+*  Macros
+***************************************/
+#define ZSTDv06_isError ERR_isError   /* for inlining */
+#define FSEv06_isError  ERR_isError
+#define HUFv06_isError  ERR_isError
+
+
+/*_*******************************************************
+*  Memory operations
+**********************************************************/
+static void ZSTDv06_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
+
+
+/*-*************************************************************
+*   Context management
+***************************************************************/
+typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
+               ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock } ZSTDv06_dStage;
+
+struct ZSTDv06_DCtx_s
+{
+    FSEv06_DTable LLTable[FSEv06_DTABLE_SIZE_U32(LLFSELog)];
+    FSEv06_DTable OffTable[FSEv06_DTABLE_SIZE_U32(OffFSELog)];
+    FSEv06_DTable MLTable[FSEv06_DTABLE_SIZE_U32(MLFSELog)];
+    unsigned   hufTableX4[HUFv06_DTABLE_SIZE(HufLog)];
+    const void* previousDstEnd;
+    const void* base;
+    const void* vBase;
+    const void* dictEnd;
+    size_t expected;
+    size_t headerSize;
+    ZSTDv06_frameParams fParams;
+    blockType_t bType;   /* used in ZSTDv06_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
+    ZSTDv06_dStage stage;
+    U32 flagRepeatTable;
+    const BYTE* litPtr;
+    size_t litSize;
+    BYTE litBuffer[ZSTDv06_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
+    BYTE headerBuffer[ZSTDv06_FRAMEHEADERSIZE_MAX];
+};  /* typedef'd to ZSTDv06_DCtx within "zstd_static.h" */
+
+size_t ZSTDv06_sizeofDCtx (void) { return sizeof(ZSTDv06_DCtx); }   /* non published interface */
+
+size_t ZSTDv06_decompressBegin(ZSTDv06_DCtx* dctx)
+{
+    dctx->expected = ZSTDv06_frameHeaderSize_min;
+    dctx->stage = ZSTDds_getFrameHeaderSize;
+    dctx->previousDstEnd = NULL;
+    dctx->base = NULL;
+    dctx->vBase = NULL;
+    dctx->dictEnd = NULL;
+    dctx->hufTableX4[0] = HufLog;
+    dctx->flagRepeatTable = 0;
+    return 0;
+}
+
+ZSTDv06_DCtx* ZSTDv06_createDCtx(void)
+{
+    ZSTDv06_DCtx* dctx = (ZSTDv06_DCtx*)malloc(sizeof(ZSTDv06_DCtx));
+    if (dctx==NULL) return NULL;
+    ZSTDv06_decompressBegin(dctx);
+    return dctx;
+}
+
+size_t ZSTDv06_freeDCtx(ZSTDv06_DCtx* dctx)
+{
+    free(dctx);
+    return 0;   /* reserved as a potential error code in the future */
+}
+
+void ZSTDv06_copyDCtx(ZSTDv06_DCtx* dstDCtx, const ZSTDv06_DCtx* srcDCtx)
+{
+    memcpy(dstDCtx, srcDCtx,
+           sizeof(ZSTDv06_DCtx) - (ZSTDv06_BLOCKSIZE_MAX+WILDCOPY_OVERLENGTH + ZSTDv06_frameHeaderSize_max));  /* no need to copy workspace */
+}
+
+
+/*-*************************************************************
+*   Decompression section
+***************************************************************/
+
+/* Frame format description
+   Frame Header -  [ Block Header - Block ] - Frame End
+   1) Frame Header
+      - 4 bytes - Magic Number : ZSTDv06_MAGICNUMBER (defined within zstd_static.h)
+      - 1 byte  - Frame Descriptor
+   2) Block Header
+      - 3 bytes, starting with a 2-bits descriptor
+                 Uncompressed, Compressed, Frame End, unused
+   3) Block
+      See Block Format Description
+   4) Frame End
+      - 3 bytes, compatible with Block Header
+*/
+
+
+/* Frame descriptor
+
+   1 byte, using :
+   bit 0-3 : windowLog - ZSTDv06_WINDOWLOG_ABSOLUTEMIN   (see zstd_internal.h)
+   bit 4   : minmatch 4(0) or 3(1)
+   bit 5   : reserved (must be zero)
+   bit 6-7 : Frame content size : unknown, 1 byte, 2 bytes, 8 bytes
+
+   Optional : content size (0, 1, 2 or 8 bytes)
+   0 : unknown
+   1 : 0-255 bytes
+   2 : 256 - 65535+256
+   8 : up to 16 exa
+*/
+
+
+/* Compressed Block, format description
+
+   Block = Literal Section - Sequences Section
+   Prerequisite : size of (compressed) block, maximum size of regenerated data
+
+   1) Literal Section
+
+   1.1) Header : 1-5 bytes
+        flags: 2 bits
+            00 compressed by Huff0
+            01 unused
+            10 is Raw (uncompressed)
+            11 is Rle
+            Note : using 01 => Huff0 with precomputed table ?
+            Note : delta map ? => compressed ?
+
+   1.1.1) Huff0-compressed literal block : 3-5 bytes
+            srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
+            srcSize < 1 KB => 3 bytes (2-2-10-10)
+            srcSize < 16KB => 4 bytes (2-2-14-14)
+            else           => 5 bytes (2-2-18-18)
+            big endian convention
+
+   1.1.2) Raw (uncompressed) literal block header : 1-3 bytes
+        size :  5 bits: (IS_RAW<<6) + (0<<4) + size
+               12 bits: (IS_RAW<<6) + (2<<4) + (size>>8)
+                        size&255
+               20 bits: (IS_RAW<<6) + (3<<4) + (size>>16)
+                        size>>8&255
+                        size&255
+
+   1.1.3) Rle (repeated single byte) literal block header : 1-3 bytes
+        size :  5 bits: (IS_RLE<<6) + (0<<4) + size
+               12 bits: (IS_RLE<<6) + (2<<4) + (size>>8)
+                        size&255
+               20 bits: (IS_RLE<<6) + (3<<4) + (size>>16)
+                        size>>8&255
+                        size&255
+
+   1.1.4) Huff0-compressed literal block, using precomputed CTables : 3-5 bytes
+            srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
+            srcSize < 1 KB => 3 bytes (2-2-10-10)
+            srcSize < 16KB => 4 bytes (2-2-14-14)
+            else           => 5 bytes (2-2-18-18)
+            big endian convention
+
+        1- CTable available (stored into workspace ?)
+        2- Small input (fast heuristic ? Full comparison ? depend on clevel ?)
+
+
+   1.2) Literal block content
+
+   1.2.1) Huff0 block, using sizes from header
+        See Huff0 format
+
+   1.2.2) Huff0 block, using prepared table
+
+   1.2.3) Raw content
+
+   1.2.4) single byte
+
+
+   2) Sequences section
+      TO DO
+*/
+
+/** ZSTDv06_frameHeaderSize() :
+*   srcSize must be >= ZSTDv06_frameHeaderSize_min.
+*   @return : size of the Frame Header */
+static size_t ZSTDv06_frameHeaderSize(const void* src, size_t srcSize)
+{
+    if (srcSize < ZSTDv06_frameHeaderSize_min) return ERROR(srcSize_wrong);
+    { U32 const fcsId = (((const BYTE*)src)[4]) >> 6;
+      return ZSTDv06_frameHeaderSize_min + ZSTDv06_fcs_fieldSize[fcsId]; }
+}
+
+
+/** ZSTDv06_getFrameParams() :
+*   decode Frame Header, or provide expected `srcSize`.
+*   @return : 0, `fparamsPtr` is correctly filled,
+*            >0, `srcSize` is too small, result is expected `srcSize`,
+*             or an error code, which can be tested using ZSTDv06_isError() */
+size_t ZSTDv06_getFrameParams(ZSTDv06_frameParams* fparamsPtr, const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+
+    if (srcSize < ZSTDv06_frameHeaderSize_min) return ZSTDv06_frameHeaderSize_min;
+    if (MEM_readLE32(src) != ZSTDv06_MAGICNUMBER) return ERROR(prefix_unknown);
+
+    /* ensure there is enough `srcSize` to fully read/decode frame header */
+    { size_t const fhsize = ZSTDv06_frameHeaderSize(src, srcSize);
+      if (srcSize < fhsize) return fhsize; }
+
+    memset(fparamsPtr, 0, sizeof(*fparamsPtr));
+    {   BYTE const frameDesc = ip[4];
+        fparamsPtr->windowLog = (frameDesc & 0xF) + ZSTDv06_WINDOWLOG_ABSOLUTEMIN;
+        if ((frameDesc & 0x20) != 0) return ERROR(frameParameter_unsupported);   /* reserved 1 bit */
+        switch(frameDesc >> 6)  /* fcsId */
+        {
+            default:   /* impossible */
+            case 0 : fparamsPtr->frameContentSize = 0; break;
+            case 1 : fparamsPtr->frameContentSize = ip[5]; break;
+            case 2 : fparamsPtr->frameContentSize = MEM_readLE16(ip+5)+256; break;
+            case 3 : fparamsPtr->frameContentSize = MEM_readLE64(ip+5); break;
+    }   }
+    return 0;
+}
+
+
+/** ZSTDv06_decodeFrameHeader() :
+*   `srcSize` must be the size provided by ZSTDv06_frameHeaderSize().
+*   @return : 0 if success, or an error code, which can be tested using ZSTDv06_isError() */
+static size_t ZSTDv06_decodeFrameHeader(ZSTDv06_DCtx* zc, const void* src, size_t srcSize)
+{
+    size_t const result = ZSTDv06_getFrameParams(&(zc->fParams), src, srcSize);
+    if ((MEM_32bits()) && (zc->fParams.windowLog > 25)) return ERROR(frameParameter_unsupported);
+    return result;
+}
+
+
+typedef struct
+{
+    blockType_t blockType;
+    U32 origSize;
+} blockProperties_t;
+
+/*! ZSTDv06_getcBlockSize() :
+*   Provides the size of compressed block from block header `src` */
+size_t ZSTDv06_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
+{
+    const BYTE* const in = (const BYTE* const)src;
+    U32 cSize;
+
+    if (srcSize < ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong);
+
+    bpPtr->blockType = (blockType_t)((*in) >> 6);
+    cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
+    bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
+
+    if (bpPtr->blockType == bt_end) return 0;
+    if (bpPtr->blockType == bt_rle) return 1;
+    return cSize;
+}
+
+
+static size_t ZSTDv06_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
+    memcpy(dst, src, srcSize);
+    return srcSize;
+}
+
+
+/*! ZSTDv06_decodeLiteralsBlock() :
+    @return : nb of bytes read from src (< srcSize ) */
+size_t ZSTDv06_decodeLiteralsBlock(ZSTDv06_DCtx* dctx,
+                          const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
+{
+    const BYTE* const istart = (const BYTE*) src;
+
+    /* any compressed block with literals segment must be at least this size */
+    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
+
+    switch(istart[0]>> 6)
+    {
+    case IS_HUF:
+        {   size_t litSize, litCSize, singleStream=0;
+            U32 lhSize = ((istart[0]) >> 4) & 3;
+            if (srcSize < 5) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for lhSize, + cSize (+nbSeq) */
+            switch(lhSize)
+            {
+            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */
+                /* 2 - 2 - 10 - 10 */
+                lhSize=3;
+                singleStream = istart[0] & 16;
+                litSize  = ((istart[0] & 15) << 6) + (istart[1] >> 2);
+                litCSize = ((istart[1] &  3) << 8) + istart[2];
+                break;
+            case 2:
+                /* 2 - 2 - 14 - 14 */
+                lhSize=4;
+                litSize  = ((istart[0] & 15) << 10) + (istart[1] << 2) + (istart[2] >> 6);
+                litCSize = ((istart[2] & 63) <<  8) + istart[3];
+                break;
+            case 3:
+                /* 2 - 2 - 18 - 18 */
+                lhSize=5;
+                litSize  = ((istart[0] & 15) << 14) + (istart[1] << 6) + (istart[2] >> 2);
+                litCSize = ((istart[2] &  3) << 16) + (istart[3] << 8) + istart[4];
+                break;
+            }
+            if (litSize > ZSTDv06_BLOCKSIZE_MAX) return ERROR(corruption_detected);
+            if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
+
+            if (HUFv06_isError(singleStream ?
+                            HUFv06_decompress1X2(dctx->litBuffer, litSize, istart+lhSize, litCSize) :
+                            HUFv06_decompress   (dctx->litBuffer, litSize, istart+lhSize, litCSize) ))
+                return ERROR(corruption_detected);
+
+            dctx->litPtr = dctx->litBuffer;
+            dctx->litSize = litSize;
+            memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+            return litCSize + lhSize;
+        }
+    case IS_PCH:
+        {   size_t litSize, litCSize;
+            U32 lhSize = ((istart[0]) >> 4) & 3;
+            if (lhSize != 1)  /* only case supported for now : small litSize, single stream */
+                return ERROR(corruption_detected);
+            if (!dctx->flagRepeatTable)
+                return ERROR(dictionary_corrupted);
+
+            /* 2 - 2 - 10 - 10 */
+            lhSize=3;
+            litSize  = ((istart[0] & 15) << 6) + (istart[1] >> 2);
+            litCSize = ((istart[1] &  3) << 8) + istart[2];
+            if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
+
+            {   size_t const errorCode = HUFv06_decompress1X4_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->hufTableX4);
+                if (HUFv06_isError(errorCode)) return ERROR(corruption_detected);
+            }
+            dctx->litPtr = dctx->litBuffer;
+            dctx->litSize = litSize;
+            memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+            return litCSize + lhSize;
+        }
+    case IS_RAW:
+        {   size_t litSize;
+            U32 lhSize = ((istart[0]) >> 4) & 3;
+            switch(lhSize)
+            {
+            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */
+                lhSize=1;
+                litSize = istart[0] & 31;
+                break;
+            case 2:
+                litSize = ((istart[0] & 15) << 8) + istart[1];
+                break;
+            case 3:
+                litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
+                break;
+            }
+
+            if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) {  /* risk reading beyond src buffer with wildcopy */
+                if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
+                memcpy(dctx->litBuffer, istart+lhSize, litSize);
+                dctx->litPtr = dctx->litBuffer;
+                dctx->litSize = litSize;
+                memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+                return lhSize+litSize;
+            }
+            /* direct reference into compressed stream */
+            dctx->litPtr = istart+lhSize;
+            dctx->litSize = litSize;
+            return lhSize+litSize;
+        }
+    case IS_RLE:
+        {   size_t litSize;
+            U32 lhSize = ((istart[0]) >> 4) & 3;
+            switch(lhSize)
+            {
+            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */
+                lhSize = 1;
+                litSize = istart[0] & 31;
+                break;
+            case 2:
+                litSize = ((istart[0] & 15) << 8) + istart[1];
+                break;
+            case 3:
+                litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
+                if (srcSize<4) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
+                break;
+            }
+            if (litSize > ZSTDv06_BLOCKSIZE_MAX) return ERROR(corruption_detected);
+            memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
+            dctx->litPtr = dctx->litBuffer;
+            dctx->litSize = litSize;
+            return lhSize+1;
+        }
+    default:
+        return ERROR(corruption_detected);   /* impossible */
+    }
+}
+
+
+/*! ZSTDv06_buildSeqTable() :
+    @return : nb bytes read from src,
+              or an error code if it fails, testable with ZSTDv06_isError()
+*/
+size_t ZSTDv06_buildSeqTable(FSEv06_DTable* DTable, U32 type, U32 max, U32 maxLog,
+                                 const void* src, size_t srcSize,
+                                 const S16* defaultNorm, U32 defaultLog, U32 flagRepeatTable)
+{
+    switch(type)
+    {
+    case FSEv06_ENCODING_RLE :
+        if (!srcSize) return ERROR(srcSize_wrong);
+        if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected);
+        FSEv06_buildDTable_rle(DTable, *(const BYTE*)src);   /* if *src > max, data is corrupted */
+        return 1;
+    case FSEv06_ENCODING_RAW :
+        FSEv06_buildDTable(DTable, defaultNorm, max, defaultLog);
+        return 0;
+    case FSEv06_ENCODING_STATIC:
+        if (!flagRepeatTable) return ERROR(corruption_detected);
+        return 0;
+    default :   /* impossible */
+    case FSEv06_ENCODING_DYNAMIC :
+        {   U32 tableLog;
+            S16 norm[MaxSeq+1];
+            size_t const headerSize = FSEv06_readNCount(norm, &max, &tableLog, src, srcSize);
+            if (FSEv06_isError(headerSize)) return ERROR(corruption_detected);
+            if (tableLog > maxLog) return ERROR(corruption_detected);
+            FSEv06_buildDTable(DTable, norm, max, tableLog);
+            return headerSize;
+    }   }
+}
+
+
+size_t ZSTDv06_decodeSeqHeaders(int* nbSeqPtr,
+                             FSEv06_DTable* DTableLL, FSEv06_DTable* DTableML, FSEv06_DTable* DTableOffb, U32 flagRepeatTable,
+                             const void* src, size_t srcSize)
+{
+    const BYTE* const istart = (const BYTE* const)src;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* ip = istart;
+
+    /* check */
+    if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
+
+    /* SeqHead */
+    {   int nbSeq = *ip++;
+        if (!nbSeq) { *nbSeqPtr=0; return 1; }
+        if (nbSeq > 0x7F) {
+            if (nbSeq == 0xFF) {
+                if (ip+2 > iend) return ERROR(srcSize_wrong);
+                nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
+            } else {
+                if (ip >= iend) return ERROR(srcSize_wrong);
+                nbSeq = ((nbSeq-0x80)<<8) + *ip++;
+            }
+        }
+        *nbSeqPtr = nbSeq;
+    }
+
+    /* FSE table descriptors */
+    {   U32 const LLtype  = *ip >> 6;
+        U32 const Offtype = (*ip >> 4) & 3;
+        U32 const MLtype  = (*ip >> 2) & 3;
+        ip++;
+
+        /* check */
+        if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
+
+        /* Build DTables */
+        {   size_t const bhSize = ZSTDv06_buildSeqTable(DTableLL, LLtype, MaxLL, LLFSELog, ip, iend-ip, LL_defaultNorm, LL_defaultNormLog, flagRepeatTable);
+            if (ZSTDv06_isError(bhSize)) return ERROR(corruption_detected);
+            ip += bhSize;
+        }
+        {   size_t const bhSize = ZSTDv06_buildSeqTable(DTableOffb, Offtype, MaxOff, OffFSELog, ip, iend-ip, OF_defaultNorm, OF_defaultNormLog, flagRepeatTable);
+            if (ZSTDv06_isError(bhSize)) return ERROR(corruption_detected);
+            ip += bhSize;
+        }
+        {   size_t const bhSize = ZSTDv06_buildSeqTable(DTableML, MLtype, MaxML, MLFSELog, ip, iend-ip, ML_defaultNorm, ML_defaultNormLog, flagRepeatTable);
+            if (ZSTDv06_isError(bhSize)) return ERROR(corruption_detected);
+            ip += bhSize;
+    }   }
+
+    return ip-istart;
+}
+
+
+typedef struct {
+    size_t litLength;
+    size_t matchLength;
+    size_t offset;
+} seq_t;
+
+typedef struct {
+    BITv06_DStream_t DStream;
+    FSEv06_DState_t stateLL;
+    FSEv06_DState_t stateOffb;
+    FSEv06_DState_t stateML;
+    size_t prevOffset[ZSTDv06_REP_INIT];
+} seqState_t;
+
+
+
+static void ZSTDv06_decodeSequence(seq_t* seq, seqState_t* seqState)
+{
+    /* Literal length */
+    U32 const llCode = FSEv06_peekSymbol(&(seqState->stateLL));
+    U32 const mlCode = FSEv06_peekSymbol(&(seqState->stateML));
+    U32 const ofCode = FSEv06_peekSymbol(&(seqState->stateOffb));   /* <= maxOff, by table construction */
+
+    U32 const llBits = LL_bits[llCode];
+    U32 const mlBits = ML_bits[mlCode];
+    U32 const ofBits = ofCode;
+    U32 const totalBits = llBits+mlBits+ofBits;
+
+    static const U32 LL_base[MaxLL+1] = {
+                             0,  1,  2,  3,  4,  5,  6,  7,  8,  9,   10,    11,    12,    13,    14,     15,
+                            16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
+                            0x2000, 0x4000, 0x8000, 0x10000 };
+
+    static const U32 ML_base[MaxML+1] = {
+                             0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10,   11,    12,    13,    14,    15,
+                            16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,   27,    28,    29,    30,    31,
+                            32, 34, 36, 38, 40, 44, 48, 56, 64, 80, 96, 0x80, 0x100, 0x200, 0x400, 0x800,
+                            0x1000, 0x2000, 0x4000, 0x8000, 0x10000 };
+
+    static const U32 OF_base[MaxOff+1] = {
+                 0,        1,       3,       7,     0xF,     0x1F,     0x3F,     0x7F,
+                 0xFF,   0x1FF,   0x3FF,   0x7FF,   0xFFF,   0x1FFF,   0x3FFF,   0x7FFF,
+                 0xFFFF, 0x1FFFF, 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,
+                 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, /*fake*/ 1, 1 };
+
+    /* sequence */
+    {   size_t offset;
+        if (!ofCode)
+            offset = 0;
+        else {
+            offset = OF_base[ofCode] + BITv06_readBits(&(seqState->DStream), ofBits);   /* <=  26 bits */
+            if (MEM_32bits()) BITv06_reloadDStream(&(seqState->DStream));
+        }
+
+        if (offset < ZSTDv06_REP_NUM) {
+            if (llCode == 0 && offset <= 1) offset = 1-offset;
+
+            if (offset != 0) {
+                size_t temp = seqState->prevOffset[offset];
+                if (offset != 1) {
+                    seqState->prevOffset[2] = seqState->prevOffset[1];
+                }
+                seqState->prevOffset[1] = seqState->prevOffset[0];
+                seqState->prevOffset[0] = offset = temp;
+
+            } else {
+                offset = seqState->prevOffset[0];
+            }
+        } else {
+            offset -= ZSTDv06_REP_MOVE;
+            seqState->prevOffset[2] = seqState->prevOffset[1];
+            seqState->prevOffset[1] = seqState->prevOffset[0];
+            seqState->prevOffset[0] = offset;
+        }
+        seq->offset = offset;
+    }
+
+    seq->matchLength = ML_base[mlCode] + MINMATCH + ((mlCode>31) ? BITv06_readBits(&(seqState->DStream), mlBits) : 0);   /* <=  16 bits */
+    if (MEM_32bits() && (mlBits+llBits>24)) BITv06_reloadDStream(&(seqState->DStream));
+
+    seq->litLength = LL_base[llCode] + ((llCode>15) ? BITv06_readBits(&(seqState->DStream), llBits) : 0);   /* <=  16 bits */
+    if (MEM_32bits() ||
+       (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BITv06_reloadDStream(&(seqState->DStream));
+
+    /* ANS state update */
+    FSEv06_updateState(&(seqState->stateLL), &(seqState->DStream));   /* <=  9 bits */
+    FSEv06_updateState(&(seqState->stateML), &(seqState->DStream));   /* <=  9 bits */
+    if (MEM_32bits()) BITv06_reloadDStream(&(seqState->DStream));     /* <= 18 bits */
+    FSEv06_updateState(&(seqState->stateOffb), &(seqState->DStream)); /* <=  8 bits */
+}
+
+
+size_t ZSTDv06_execSequence(BYTE* op,
+                                BYTE* const oend, seq_t sequence,
+                                const BYTE** litPtr, const BYTE* const litLimit,
+                                const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
+{
+    BYTE* const oLitEnd = op + sequence.litLength;
+    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
+    BYTE* const oend_8 = oend-8;
+    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+    const BYTE* match = oLitEnd - sequence.offset;
+
+    /* check */
+    if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall);   /* last match must start at a minimum distance of 8 from oend */
+    if (oMatchEnd > oend) return ERROR(dstSize_tooSmall);   /* overwrite beyond dst buffer */
+    if (iLitEnd > litLimit) return ERROR(corruption_detected);   /* over-read beyond lit buffer */
+
+    /* copy Literals */
+    ZSTDv06_wildcopy(op, *litPtr, sequence.litLength);   /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
+    op = oLitEnd;
+    *litPtr = iLitEnd;   /* update for next sequence */
+
+    /* copy Match */
+    if (sequence.offset > (size_t)(oLitEnd - base)) {
+        /* offset beyond prefix */
+        if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
+        match = dictEnd - (base-match);
+        if (match + sequence.matchLength <= dictEnd) {
+            memmove(oLitEnd, match, sequence.matchLength);
+            return sequenceLength;
+        }
+        /* span extDict & currentPrefixSegment */
+        {   size_t const length1 = dictEnd - match;
+            memmove(oLitEnd, match, length1);
+            op = oLitEnd + length1;
+            sequence.matchLength -= length1;
+            match = base;
+            if (op > oend_8 || sequence.matchLength < MINMATCH) {
+              while (op < oMatchEnd) *op++ = *match++;
+              return sequenceLength;
+            }
+    }   }
+    /* Requirement: op <= oend_8 */
+
+    /* match within prefix */
+    if (sequence.offset < 8) {
+        /* close range match, overlap */
+        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
+        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* substracted */
+        int const sub2 = dec64table[sequence.offset];
+        op[0] = match[0];
+        op[1] = match[1];
+        op[2] = match[2];
+        op[3] = match[3];
+        match += dec32table[sequence.offset];
+        ZSTDv06_copy4(op+4, match);
+        match -= sub2;
+    } else {
+        ZSTDv06_copy8(op, match);
+    }
+    op += 8; match += 8;
+
+    if (oMatchEnd > oend-(16-MINMATCH)) {
+        if (op < oend_8) {
+            ZSTDv06_wildcopy(op, match, oend_8 - op);
+            match += oend_8 - op;
+            op = oend_8;
+        }
+        while (op < oMatchEnd) *op++ = *match++;
+    } else {
+        ZSTDv06_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
+    }
+    return sequenceLength;
+}
+
+
+static size_t ZSTDv06_decompressSequences(
+                               ZSTDv06_DCtx* dctx,
+                               void* dst, size_t maxDstSize,
+                         const void* seqStart, size_t seqSize)
+{
+    const BYTE* ip = (const BYTE*)seqStart;
+    const BYTE* const iend = ip + seqSize;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* const oend = ostart + maxDstSize;
+    BYTE* op = ostart;
+    const BYTE* litPtr = dctx->litPtr;
+    const BYTE* const litEnd = litPtr + dctx->litSize;
+    FSEv06_DTable* DTableLL = dctx->LLTable;
+    FSEv06_DTable* DTableML = dctx->MLTable;
+    FSEv06_DTable* DTableOffb = dctx->OffTable;
+    const BYTE* const base = (const BYTE*) (dctx->base);
+    const BYTE* const vBase = (const BYTE*) (dctx->vBase);
+    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+    int nbSeq;
+
+    /* Build Decoding Tables */
+    {   size_t const seqHSize = ZSTDv06_decodeSeqHeaders(&nbSeq, DTableLL, DTableML, DTableOffb, dctx->flagRepeatTable, ip, seqSize);
+        if (ZSTDv06_isError(seqHSize)) return seqHSize;
+        ip += seqHSize;
+        dctx->flagRepeatTable = 0;
+    }
+
+    /* Regen sequences */
+    if (nbSeq) {
+        seq_t sequence;
+        seqState_t seqState;
+
+        memset(&sequence, 0, sizeof(sequence));
+        sequence.offset = REPCODE_STARTVALUE;
+        { U32 i; for (i=0; i<ZSTDv06_REP_INIT; i++) seqState.prevOffset[i] = REPCODE_STARTVALUE; }
+        { size_t const errorCode = BITv06_initDStream(&(seqState.DStream), ip, iend-ip);
+          if (ERR_isError(errorCode)) return ERROR(corruption_detected); }
+        FSEv06_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
+        FSEv06_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
+        FSEv06_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
+
+        for ( ; (BITv06_reloadDStream(&(seqState.DStream)) <= BITv06_DStream_completed) && nbSeq ; ) {
+            nbSeq--;
+            ZSTDv06_decodeSequence(&sequence, &seqState);
+
+#if 0  /* debug */
+            static BYTE* start = NULL;
+            if (start==NULL) start = op;
+            size_t pos = (size_t)(op-start);
+            if ((pos >= 5810037) && (pos < 5810400))
+                printf("Dpos %6u :%5u literals & match %3u bytes at distance %6u \n",
+                       pos, (U32)sequence.litLength, (U32)sequence.matchLength, (U32)sequence.offset);
+#endif
+
+            {   size_t const oneSeqSize = ZSTDv06_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
+                if (ZSTDv06_isError(oneSeqSize)) return oneSeqSize;
+                op += oneSeqSize;
+        }   }
+
+        /* check if reached exact end */
+        if (nbSeq) return ERROR(corruption_detected);
+    }
+
+    /* last literal segment */
+    {   size_t const lastLLSize = litEnd - litPtr;
+        if (litPtr > litEnd) return ERROR(corruption_detected);   /* too many literals already used */
+        if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
+        memcpy(op, litPtr, lastLLSize);
+        op += lastLLSize;
+    }
+
+    return op-ostart;
+}
+
+
+static void ZSTDv06_checkContinuity(ZSTDv06_DCtx* dctx, const void* dst)
+{
+    if (dst != dctx->previousDstEnd) {   /* not contiguous */
+        dctx->dictEnd = dctx->previousDstEnd;
+        dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
+        dctx->base = dst;
+        dctx->previousDstEnd = dst;
+    }
+}
+
+
+static size_t ZSTDv06_decompressBlock_internal(ZSTDv06_DCtx* dctx,
+                            void* dst, size_t dstCapacity,
+                      const void* src, size_t srcSize)
+{   /* blockType == blockCompressed */
+    const BYTE* ip = (const BYTE*)src;
+
+    if (srcSize >= ZSTDv06_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);
+
+    /* Decode literals sub-block */
+    {   size_t const litCSize = ZSTDv06_decodeLiteralsBlock(dctx, src, srcSize);
+        if (ZSTDv06_isError(litCSize)) return litCSize;
+        ip += litCSize;
+        srcSize -= litCSize;
+    }
+    return ZSTDv06_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
+}
+
+
+size_t ZSTDv06_decompressBlock(ZSTDv06_DCtx* dctx,
+                            void* dst, size_t dstCapacity,
+                      const void* src, size_t srcSize)
+{
+    ZSTDv06_checkContinuity(dctx, dst);
+    return ZSTDv06_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
+}
+
+
+/*! ZSTDv06_decompressFrame() :
+*   `dctx` must be properly initialized */
+static size_t ZSTDv06_decompressFrame(ZSTDv06_DCtx* dctx,
+                                 void* dst, size_t dstCapacity,
+                                 const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+    const BYTE* const iend = ip + srcSize;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* op = ostart;
+    BYTE* const oend = ostart + dstCapacity;
+    size_t remainingSize = srcSize;
+    blockProperties_t blockProperties = { bt_compressed, 0 };
+
+    /* check */
+    if (srcSize < ZSTDv06_frameHeaderSize_min+ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong);
+
+    /* Frame Header */
+    {   size_t const frameHeaderSize = ZSTDv06_frameHeaderSize(src, ZSTDv06_frameHeaderSize_min);
+        if (ZSTDv06_isError(frameHeaderSize)) return frameHeaderSize;
+        if (srcSize < frameHeaderSize+ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong);
+        if (ZSTDv06_decodeFrameHeader(dctx, src, frameHeaderSize)) return ERROR(corruption_detected);
+        ip += frameHeaderSize; remainingSize -= frameHeaderSize;
+    }
+
+    /* Loop on each block */
+    while (1) {
+        size_t decodedSize=0;
+        size_t const cBlockSize = ZSTDv06_getcBlockSize(ip, iend-ip, &blockProperties);
+        if (ZSTDv06_isError(cBlockSize)) return cBlockSize;
+
+        ip += ZSTDv06_blockHeaderSize;
+        remainingSize -= ZSTDv06_blockHeaderSize;
+        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+        switch(blockProperties.blockType)
+        {
+        case bt_compressed:
+            decodedSize = ZSTDv06_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize);
+            break;
+        case bt_raw :
+            decodedSize = ZSTDv06_copyRawBlock(op, oend-op, ip, cBlockSize);
+            break;
+        case bt_rle :
+            return ERROR(GENERIC);   /* not yet supported */
+            break;
+        case bt_end :
+            /* end of frame */
+            if (remainingSize) return ERROR(srcSize_wrong);
+            break;
+        default:
+            return ERROR(GENERIC);   /* impossible */
+        }
+        if (cBlockSize == 0) break;   /* bt_end */
+
+        if (ZSTDv06_isError(decodedSize)) return decodedSize;
+        op += decodedSize;
+        ip += cBlockSize;
+        remainingSize -= cBlockSize;
+    }
+
+    return op-ostart;
+}
+
+
+size_t ZSTDv06_decompress_usingPreparedDCtx(ZSTDv06_DCtx* dctx, const ZSTDv06_DCtx* refDCtx,
+                                         void* dst, size_t dstCapacity,
+                                   const void* src, size_t srcSize)
+{
+    ZSTDv06_copyDCtx(dctx, refDCtx);
+    ZSTDv06_checkContinuity(dctx, dst);
+    return ZSTDv06_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
+}
+
+
+size_t ZSTDv06_decompress_usingDict(ZSTDv06_DCtx* dctx,
+                                 void* dst, size_t dstCapacity,
+                                 const void* src, size_t srcSize,
+                                 const void* dict, size_t dictSize)
+{
+    ZSTDv06_decompressBegin_usingDict(dctx, dict, dictSize);
+    ZSTDv06_checkContinuity(dctx, dst);
+    return ZSTDv06_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
+}
+
+
+size_t ZSTDv06_decompressDCtx(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    return ZSTDv06_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
+}
+
+
+size_t ZSTDv06_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+#if defined(ZSTDv06_HEAPMODE) && (ZSTDv06_HEAPMODE==1)
+    size_t regenSize;
+    ZSTDv06_DCtx* dctx = ZSTDv06_createDCtx();
+    if (dctx==NULL) return ERROR(memory_allocation);
+    regenSize = ZSTDv06_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
+    ZSTDv06_freeDCtx(dctx);
+    return regenSize;
+#else   /* stack mode */
+    ZSTDv06_DCtx dctx;
+    return ZSTDv06_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
+#endif
+}
+
+size_t ZSTDv06_findFrameCompressedSize(const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+    size_t remainingSize = srcSize;
+    blockProperties_t blockProperties = { bt_compressed, 0 };
+
+    /* Frame Header */
+    {   size_t const frameHeaderSize = ZSTDv06_frameHeaderSize(src, ZSTDv06_frameHeaderSize_min);
+        if (ZSTDv06_isError(frameHeaderSize)) return frameHeaderSize;
+        if (MEM_readLE32(src) != ZSTDv06_MAGICNUMBER) return ERROR(prefix_unknown);
+        if (srcSize < frameHeaderSize+ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong);
+        ip += frameHeaderSize; remainingSize -= frameHeaderSize;
+    }
+
+    /* Loop on each block */
+    while (1) {
+        size_t const cBlockSize = ZSTDv06_getcBlockSize(ip, remainingSize, &blockProperties);
+        if (ZSTDv06_isError(cBlockSize)) return cBlockSize;
+
+        ip += ZSTDv06_blockHeaderSize;
+        remainingSize -= ZSTDv06_blockHeaderSize;
+        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+        if (cBlockSize == 0) break;   /* bt_end */
+
+        ip += cBlockSize;
+        remainingSize -= cBlockSize;
+    }
+
+    return ip - (const BYTE*)src;
+}
+
+/*_******************************
+*  Streaming Decompression API
+********************************/
+size_t ZSTDv06_nextSrcSizeToDecompress(ZSTDv06_DCtx* dctx)
+{
+    return dctx->expected;
+}
+
+size_t ZSTDv06_decompressContinue(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    /* Sanity check */
+    if (srcSize != dctx->expected) return ERROR(srcSize_wrong);
+    if (dstCapacity) ZSTDv06_checkContinuity(dctx, dst);
+
+    /* Decompress : frame header; part 1 */
+    switch (dctx->stage)
+    {
+    case ZSTDds_getFrameHeaderSize :
+        if (srcSize != ZSTDv06_frameHeaderSize_min) return ERROR(srcSize_wrong);   /* impossible */
+        dctx->headerSize = ZSTDv06_frameHeaderSize(src, ZSTDv06_frameHeaderSize_min);
+        if (ZSTDv06_isError(dctx->headerSize)) return dctx->headerSize;
+        memcpy(dctx->headerBuffer, src, ZSTDv06_frameHeaderSize_min);
+        if (dctx->headerSize > ZSTDv06_frameHeaderSize_min) {
+            dctx->expected = dctx->headerSize - ZSTDv06_frameHeaderSize_min;
+            dctx->stage = ZSTDds_decodeFrameHeader;
+            return 0;
+        }
+        dctx->expected = 0;   /* not necessary to copy more */
+	/* fall-through */
+    case ZSTDds_decodeFrameHeader:
+        {   size_t result;
+            memcpy(dctx->headerBuffer + ZSTDv06_frameHeaderSize_min, src, dctx->expected);
+            result = ZSTDv06_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize);
+            if (ZSTDv06_isError(result)) return result;
+            dctx->expected = ZSTDv06_blockHeaderSize;
+            dctx->stage = ZSTDds_decodeBlockHeader;
+            return 0;
+        }
+    case ZSTDds_decodeBlockHeader:
+        {   blockProperties_t bp;
+            size_t const cBlockSize = ZSTDv06_getcBlockSize(src, ZSTDv06_blockHeaderSize, &bp);
+            if (ZSTDv06_isError(cBlockSize)) return cBlockSize;
+            if (bp.blockType == bt_end) {
+                dctx->expected = 0;
+                dctx->stage = ZSTDds_getFrameHeaderSize;
+            } else {
+                dctx->expected = cBlockSize;
+                dctx->bType = bp.blockType;
+                dctx->stage = ZSTDds_decompressBlock;
+            }
+            return 0;
+        }
+    case ZSTDds_decompressBlock:
+        {   size_t rSize;
+            switch(dctx->bType)
+            {
+            case bt_compressed:
+                rSize = ZSTDv06_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
+                break;
+            case bt_raw :
+                rSize = ZSTDv06_copyRawBlock(dst, dstCapacity, src, srcSize);
+                break;
+            case bt_rle :
+                return ERROR(GENERIC);   /* not yet handled */
+                break;
+            case bt_end :   /* should never happen (filtered at phase 1) */
+                rSize = 0;
+                break;
+            default:
+                return ERROR(GENERIC);   /* impossible */
+            }
+            dctx->stage = ZSTDds_decodeBlockHeader;
+            dctx->expected = ZSTDv06_blockHeaderSize;
+            dctx->previousDstEnd = (char*)dst + rSize;
+            return rSize;
+        }
+    default:
+        return ERROR(GENERIC);   /* impossible */
+    }
+}
+
+
+static void ZSTDv06_refDictContent(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize)
+{
+    dctx->dictEnd = dctx->previousDstEnd;
+    dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
+    dctx->base = dict;
+    dctx->previousDstEnd = (const char*)dict + dictSize;
+}
+
+static size_t ZSTDv06_loadEntropy(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize)
+{
+    size_t hSize, offcodeHeaderSize, matchlengthHeaderSize, litlengthHeaderSize;
+
+    hSize = HUFv06_readDTableX4(dctx->hufTableX4, dict, dictSize);
+    if (HUFv06_isError(hSize)) return ERROR(dictionary_corrupted);
+    dict = (const char*)dict + hSize;
+    dictSize -= hSize;
+
+    {   short offcodeNCount[MaxOff+1];
+        U32 offcodeMaxValue=MaxOff, offcodeLog;
+        offcodeHeaderSize = FSEv06_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dict, dictSize);
+        if (FSEv06_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
+        if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
+        { size_t const errorCode = FSEv06_buildDTable(dctx->OffTable, offcodeNCount, offcodeMaxValue, offcodeLog);
+          if (FSEv06_isError(errorCode)) return ERROR(dictionary_corrupted); }
+        dict = (const char*)dict + offcodeHeaderSize;
+        dictSize -= offcodeHeaderSize;
+    }
+
+    {   short matchlengthNCount[MaxML+1];
+        unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+        matchlengthHeaderSize = FSEv06_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dict, dictSize);
+        if (FSEv06_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
+        if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
+        { size_t const errorCode = FSEv06_buildDTable(dctx->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog);
+          if (FSEv06_isError(errorCode)) return ERROR(dictionary_corrupted); }
+        dict = (const char*)dict + matchlengthHeaderSize;
+        dictSize -= matchlengthHeaderSize;
+    }
+
+    {   short litlengthNCount[MaxLL+1];
+        unsigned litlengthMaxValue = MaxLL, litlengthLog;
+        litlengthHeaderSize = FSEv06_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dict, dictSize);
+        if (FSEv06_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
+        if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
+        { size_t const errorCode = FSEv06_buildDTable(dctx->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog);
+          if (FSEv06_isError(errorCode)) return ERROR(dictionary_corrupted); }
+    }
+
+    dctx->flagRepeatTable = 1;
+    return hSize + offcodeHeaderSize + matchlengthHeaderSize + litlengthHeaderSize;
+}
+
+static size_t ZSTDv06_decompress_insertDictionary(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize)
+{
+    size_t eSize;
+    U32 const magic = MEM_readLE32(dict);
+    if (magic != ZSTDv06_DICT_MAGIC) {
+        /* pure content mode */
+        ZSTDv06_refDictContent(dctx, dict, dictSize);
+        return 0;
+    }
+    /* load entropy tables */
+    dict = (const char*)dict + 4;
+    dictSize -= 4;
+    eSize = ZSTDv06_loadEntropy(dctx, dict, dictSize);
+    if (ZSTDv06_isError(eSize)) return ERROR(dictionary_corrupted);
+
+    /* reference dictionary content */
+    dict = (const char*)dict + eSize;
+    dictSize -= eSize;
+    ZSTDv06_refDictContent(dctx, dict, dictSize);
+
+    return 0;
+}
+
+
+size_t ZSTDv06_decompressBegin_usingDict(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize)
+{
+    { size_t const errorCode = ZSTDv06_decompressBegin(dctx);
+      if (ZSTDv06_isError(errorCode)) return errorCode; }
+
+    if (dict && dictSize) {
+        size_t const errorCode = ZSTDv06_decompress_insertDictionary(dctx, dict, dictSize);
+        if (ZSTDv06_isError(errorCode)) return ERROR(dictionary_corrupted);
+    }
+
+    return 0;
+}
+
+/*
+    Buffered version of Zstd compression library
+    Copyright (C) 2015-2016, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd homepage : http://www.zstd.net/
+*/
+
+
+/*-***************************************************************************
+*  Streaming decompression howto
+*
+*  A ZBUFFv06_DCtx object is required to track streaming operations.
+*  Use ZBUFFv06_createDCtx() and ZBUFFv06_freeDCtx() to create/release resources.
+*  Use ZBUFFv06_decompressInit() to start a new decompression operation,
+*   or ZBUFFv06_decompressInitDictionary() if decompression requires a dictionary.
+*  Note that ZBUFFv06_DCtx objects can be re-init multiple times.
+*
+*  Use ZBUFFv06_decompressContinue() repetitively to consume your input.
+*  *srcSizePtr and *dstCapacityPtr can be any size.
+*  The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
+*  Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
+*  The content of @dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change @dst.
+*  @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),
+*            or 0 when a frame is completely decoded,
+*            or an error code, which can be tested using ZBUFFv06_isError().
+*
+*  Hint : recommended buffer sizes (not compulsory) : ZBUFFv06_recommendedDInSize() and ZBUFFv06_recommendedDOutSize()
+*  output : ZBUFFv06_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
+*  input  : ZBUFFv06_recommendedDInSize == 128KB + 3;
+*           just follow indications from ZBUFFv06_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
+* *******************************************************************************/
+
+typedef enum { ZBUFFds_init, ZBUFFds_loadHeader,
+               ZBUFFds_read, ZBUFFds_load, ZBUFFds_flush } ZBUFFv06_dStage;
+
+/* *** Resource management *** */
+struct ZBUFFv06_DCtx_s {
+    ZSTDv06_DCtx* zd;
+    ZSTDv06_frameParams fParams;
+    ZBUFFv06_dStage stage;
+    char*  inBuff;
+    size_t inBuffSize;
+    size_t inPos;
+    char*  outBuff;
+    size_t outBuffSize;
+    size_t outStart;
+    size_t outEnd;
+    size_t blockSize;
+    BYTE headerBuffer[ZSTDv06_FRAMEHEADERSIZE_MAX];
+    size_t lhSize;
+};   /* typedef'd to ZBUFFv06_DCtx within "zstd_buffered.h" */
+
+
+ZBUFFv06_DCtx* ZBUFFv06_createDCtx(void)
+{
+    ZBUFFv06_DCtx* zbd = (ZBUFFv06_DCtx*)malloc(sizeof(ZBUFFv06_DCtx));
+    if (zbd==NULL) return NULL;
+    memset(zbd, 0, sizeof(*zbd));
+    zbd->zd = ZSTDv06_createDCtx();
+    zbd->stage = ZBUFFds_init;
+    return zbd;
+}
+
+size_t ZBUFFv06_freeDCtx(ZBUFFv06_DCtx* zbd)
+{
+    if (zbd==NULL) return 0;   /* support free on null */
+    ZSTDv06_freeDCtx(zbd->zd);
+    free(zbd->inBuff);
+    free(zbd->outBuff);
+    free(zbd);
+    return 0;
+}
+
+
+/* *** Initialization *** */
+
+size_t ZBUFFv06_decompressInitDictionary(ZBUFFv06_DCtx* zbd, const void* dict, size_t dictSize)
+{
+    zbd->stage = ZBUFFds_loadHeader;
+    zbd->lhSize = zbd->inPos = zbd->outStart = zbd->outEnd = 0;
+    return ZSTDv06_decompressBegin_usingDict(zbd->zd, dict, dictSize);
+}
+
+size_t ZBUFFv06_decompressInit(ZBUFFv06_DCtx* zbd)
+{
+    return ZBUFFv06_decompressInitDictionary(zbd, NULL, 0);
+}
+
+
+
+MEM_STATIC size_t ZBUFFv06_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    size_t length = MIN(dstCapacity, srcSize);
+    memcpy(dst, src, length);
+    return length;
+}
+
+
+/* *** Decompression *** */
+
+size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* zbd,
+                                void* dst, size_t* dstCapacityPtr,
+                          const void* src, size_t* srcSizePtr)
+{
+    const char* const istart = (const char*)src;
+    const char* const iend = istart + *srcSizePtr;
+    const char* ip = istart;
+    char* const ostart = (char*)dst;
+    char* const oend = ostart + *dstCapacityPtr;
+    char* op = ostart;
+    U32 notDone = 1;
+
+    while (notDone) {
+        switch(zbd->stage)
+        {
+        case ZBUFFds_init :
+            return ERROR(init_missing);
+
+        case ZBUFFds_loadHeader :
+            {   size_t const hSize = ZSTDv06_getFrameParams(&(zbd->fParams), zbd->headerBuffer, zbd->lhSize);
+                if (hSize != 0) {
+                    size_t const toLoad = hSize - zbd->lhSize;   /* if hSize!=0, hSize > zbd->lhSize */
+                    if (ZSTDv06_isError(hSize)) return hSize;
+                    if (toLoad > (size_t)(iend-ip)) {   /* not enough input to load full header */
+                        memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip);
+                        zbd->lhSize += iend-ip; ip = iend; notDone = 0;
+                        *dstCapacityPtr = 0;
+                        return (hSize - zbd->lhSize) + ZSTDv06_blockHeaderSize;   /* remaining header bytes + next block header */
+                    }
+                    memcpy(zbd->headerBuffer + zbd->lhSize, ip, toLoad); zbd->lhSize = hSize; ip += toLoad;
+                    break;
+            }   }
+
+            /* Consume header */
+            {   size_t const h1Size = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);  /* == ZSTDv06_frameHeaderSize_min */
+                size_t const h1Result = ZSTDv06_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer, h1Size);
+                if (ZSTDv06_isError(h1Result)) return h1Result;
+                if (h1Size < zbd->lhSize) {   /* long header */
+                    size_t const h2Size = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);
+                    size_t const h2Result = ZSTDv06_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer+h1Size, h2Size);
+                    if (ZSTDv06_isError(h2Result)) return h2Result;
+            }   }
+
+            /* Frame header instruct buffer sizes */
+            {   size_t const blockSize = MIN(1 << zbd->fParams.windowLog, ZSTDv06_BLOCKSIZE_MAX);
+                zbd->blockSize = blockSize;
+                if (zbd->inBuffSize < blockSize) {
+                    free(zbd->inBuff);
+                    zbd->inBuffSize = blockSize;
+                    zbd->inBuff = (char*)malloc(blockSize);
+                    if (zbd->inBuff == NULL) return ERROR(memory_allocation);
+                }
+                {   size_t const neededOutSize = ((size_t)1 << zbd->fParams.windowLog) + blockSize + WILDCOPY_OVERLENGTH * 2;
+                    if (zbd->outBuffSize < neededOutSize) {
+                        free(zbd->outBuff);
+                        zbd->outBuffSize = neededOutSize;
+                        zbd->outBuff = (char*)malloc(neededOutSize);
+                        if (zbd->outBuff == NULL) return ERROR(memory_allocation);
+            }   }   }
+            zbd->stage = ZBUFFds_read;
+	    /* fall-through */
+        case ZBUFFds_read:
+            {   size_t const neededInSize = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);
+                if (neededInSize==0) {  /* end of frame */
+                    zbd->stage = ZBUFFds_init;
+                    notDone = 0;
+                    break;
+                }
+                if ((size_t)(iend-ip) >= neededInSize) {  /* decode directly from src */
+                    size_t const decodedSize = ZSTDv06_decompressContinue(zbd->zd,
+                        zbd->outBuff + zbd->outStart, zbd->outBuffSize - zbd->outStart,
+                        ip, neededInSize);
+                    if (ZSTDv06_isError(decodedSize)) return decodedSize;
+                    ip += neededInSize;
+                    if (!decodedSize) break;   /* this was just a header */
+                    zbd->outEnd = zbd->outStart +  decodedSize;
+                    zbd->stage = ZBUFFds_flush;
+                    break;
+                }
+                if (ip==iend) { notDone = 0; break; }   /* no more input */
+                zbd->stage = ZBUFFds_load;
+            }
+	    /* fall-through */
+        case ZBUFFds_load:
+            {   size_t const neededInSize = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);
+                size_t const toLoad = neededInSize - zbd->inPos;   /* should always be <= remaining space within inBuff */
+                size_t loadedSize;
+                if (toLoad > zbd->inBuffSize - zbd->inPos) return ERROR(corruption_detected);   /* should never happen */
+                loadedSize = ZBUFFv06_limitCopy(zbd->inBuff + zbd->inPos, toLoad, ip, iend-ip);
+                ip += loadedSize;
+                zbd->inPos += loadedSize;
+                if (loadedSize < toLoad) { notDone = 0; break; }   /* not enough input, wait for more */
+
+                /* decode loaded input */
+                {   size_t const decodedSize = ZSTDv06_decompressContinue(zbd->zd,
+                        zbd->outBuff + zbd->outStart, zbd->outBuffSize - zbd->outStart,
+                        zbd->inBuff, neededInSize);
+                    if (ZSTDv06_isError(decodedSize)) return decodedSize;
+                    zbd->inPos = 0;   /* input is consumed */
+                    if (!decodedSize) { zbd->stage = ZBUFFds_read; break; }   /* this was just a header */
+                    zbd->outEnd = zbd->outStart +  decodedSize;
+                    zbd->stage = ZBUFFds_flush;
+                    // break; /* ZBUFFds_flush follows */
+                }
+	    }
+	    /* fall-through */
+        case ZBUFFds_flush:
+            {   size_t const toFlushSize = zbd->outEnd - zbd->outStart;
+                size_t const flushedSize = ZBUFFv06_limitCopy(op, oend-op, zbd->outBuff + zbd->outStart, toFlushSize);
+                op += flushedSize;
+                zbd->outStart += flushedSize;
+                if (flushedSize == toFlushSize) {
+                    zbd->stage = ZBUFFds_read;
+                    if (zbd->outStart + zbd->blockSize > zbd->outBuffSize)
+                        zbd->outStart = zbd->outEnd = 0;
+                    break;
+                }
+                /* cannot flush everything */
+                notDone = 0;
+                break;
+            }
+        default: return ERROR(GENERIC);   /* impossible */
+    }   }
+
+    /* result */
+    *srcSizePtr = ip-istart;
+    *dstCapacityPtr = op-ostart;
+    {   size_t nextSrcSizeHint = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);
+        if (nextSrcSizeHint > ZSTDv06_blockHeaderSize) nextSrcSizeHint+= ZSTDv06_blockHeaderSize;   /* get following block header too */
+        nextSrcSizeHint -= zbd->inPos;   /* already loaded*/
+        return nextSrcSizeHint;
+    }
+}
+
+
+
+/* *************************************
+*  Tool functions
+***************************************/
+size_t ZBUFFv06_recommendedDInSize(void)  { return ZSTDv06_BLOCKSIZE_MAX + ZSTDv06_blockHeaderSize /* block header size*/ ; }
+size_t ZBUFFv06_recommendedDOutSize(void) { return ZSTDv06_BLOCKSIZE_MAX; }
diff --git a/vendor/github.com/DataDog/zstd/zstd_v06.h b/vendor/github.com/DataDog/zstd/zstd_v06.h
new file mode 100644
index 0000000..fb4eb37
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v06.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTDv06_H
+#define ZSTDv06_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*======  Dependency  ======*/
+#include <stddef.h>   /* size_t */
+
+
+/*======  Export for Windows  ======*/
+/*!
+*  ZSTDv06_DLL_EXPORT :
+*  Enable exporting of functions when building a Windows DLL
+*/
+#if defined(_WIN32) && defined(ZSTDv06_DLL_EXPORT) && (ZSTDv06_DLL_EXPORT==1)
+#  define ZSTDLIBv06_API __declspec(dllexport)
+#else
+#  define ZSTDLIBv06_API
+#endif
+
+
+/* *************************************
+*  Simple functions
+***************************************/
+/*! ZSTDv06_decompress() :
+    `compressedSize` : is the _exact_ size of the compressed blob, otherwise decompression will fail.
+    `dstCapacity` must be large enough, equal or larger than originalSize.
+    @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
+              or an errorCode if it fails (which can be tested using ZSTDv06_isError()) */
+ZSTDLIBv06_API size_t ZSTDv06_decompress( void* dst, size_t dstCapacity,
+                                    const void* src, size_t compressedSize);
+
+/**
+ZSTDv06_getFrameSrcSize() : get the source length of a ZSTD frame
+    compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
+    return : the number of bytes that would be read to decompress this frame
+             or an errorCode if it fails (which can be tested using ZSTDv06_isError())
+*/
+size_t ZSTDv06_findFrameCompressedSize(const void* src, size_t compressedSize);
+
+/* *************************************
+*  Helper functions
+***************************************/
+ZSTDLIBv06_API size_t      ZSTDv06_compressBound(size_t srcSize); /*!< maximum compressed size (worst case scenario) */
+
+/* Error Management */
+ZSTDLIBv06_API unsigned    ZSTDv06_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */
+ZSTDLIBv06_API const char* ZSTDv06_getErrorName(size_t code);     /*!< provides readable string for an error code */
+
+
+/* *************************************
+*  Explicit memory management
+***************************************/
+/** Decompression context */
+typedef struct ZSTDv06_DCtx_s ZSTDv06_DCtx;
+ZSTDLIBv06_API ZSTDv06_DCtx* ZSTDv06_createDCtx(void);
+ZSTDLIBv06_API size_t     ZSTDv06_freeDCtx(ZSTDv06_DCtx* dctx);      /*!< @return : errorCode */
+
+/** ZSTDv06_decompressDCtx() :
+*   Same as ZSTDv06_decompress(), but requires an already allocated ZSTDv06_DCtx (see ZSTDv06_createDCtx()) */
+ZSTDLIBv06_API size_t ZSTDv06_decompressDCtx(ZSTDv06_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+/*-***********************
+*  Dictionary API
+*************************/
+/*! ZSTDv06_decompress_usingDict() :
+*   Decompression using a pre-defined Dictionary content (see dictBuilder).
+*   Dictionary must be identical to the one used during compression, otherwise regenerated data will be corrupted.
+*   Note : dict can be NULL, in which case, it's equivalent to ZSTDv06_decompressDCtx() */
+ZSTDLIBv06_API size_t ZSTDv06_decompress_usingDict(ZSTDv06_DCtx* dctx,
+                                                   void* dst, size_t dstCapacity,
+                                             const void* src, size_t srcSize,
+                                             const void* dict,size_t dictSize);
+
+
+/*-************************
+*  Advanced Streaming API
+***************************/
+struct ZSTDv06_frameParams_s { unsigned long long frameContentSize; unsigned windowLog; };
+typedef struct ZSTDv06_frameParams_s ZSTDv06_frameParams;
+
+ZSTDLIBv06_API size_t ZSTDv06_getFrameParams(ZSTDv06_frameParams* fparamsPtr, const void* src, size_t srcSize);   /**< doesn't consume input */
+ZSTDLIBv06_API size_t ZSTDv06_decompressBegin_usingDict(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize);
+ZSTDLIBv06_API void   ZSTDv06_copyDCtx(ZSTDv06_DCtx* dctx, const ZSTDv06_DCtx* preparedDCtx);
+
+ZSTDLIBv06_API size_t ZSTDv06_nextSrcSizeToDecompress(ZSTDv06_DCtx* dctx);
+ZSTDLIBv06_API size_t ZSTDv06_decompressContinue(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+
+/* *************************************
+*  ZBUFF API
+***************************************/
+
+typedef struct ZBUFFv06_DCtx_s ZBUFFv06_DCtx;
+ZSTDLIBv06_API ZBUFFv06_DCtx* ZBUFFv06_createDCtx(void);
+ZSTDLIBv06_API size_t         ZBUFFv06_freeDCtx(ZBUFFv06_DCtx* dctx);
+
+ZSTDLIBv06_API size_t ZBUFFv06_decompressInit(ZBUFFv06_DCtx* dctx);
+ZSTDLIBv06_API size_t ZBUFFv06_decompressInitDictionary(ZBUFFv06_DCtx* dctx, const void* dict, size_t dictSize);
+
+ZSTDLIBv06_API size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* dctx,
+                                                  void* dst, size_t* dstCapacityPtr,
+                                            const void* src, size_t* srcSizePtr);
+
+/*-***************************************************************************
+*  Streaming decompression howto
+*
+*  A ZBUFFv06_DCtx object is required to track streaming operations.
+*  Use ZBUFFv06_createDCtx() and ZBUFFv06_freeDCtx() to create/release resources.
+*  Use ZBUFFv06_decompressInit() to start a new decompression operation,
+*   or ZBUFFv06_decompressInitDictionary() if decompression requires a dictionary.
+*  Note that ZBUFFv06_DCtx objects can be re-init multiple times.
+*
+*  Use ZBUFFv06_decompressContinue() repetitively to consume your input.
+*  *srcSizePtr and *dstCapacityPtr can be any size.
+*  The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
+*  Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
+*  The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`.
+*  @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),
+*            or 0 when a frame is completely decoded,
+*            or an error code, which can be tested using ZBUFFv06_isError().
+*
+*  Hint : recommended buffer sizes (not compulsory) : ZBUFFv06_recommendedDInSize() and ZBUFFv06_recommendedDOutSize()
+*  output : ZBUFFv06_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
+*  input  : ZBUFFv06_recommendedDInSize == 128KB + 3;
+*           just follow indications from ZBUFFv06_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
+* *******************************************************************************/
+
+
+/* *************************************
+*  Tool functions
+***************************************/
+ZSTDLIBv06_API unsigned ZBUFFv06_isError(size_t errorCode);
+ZSTDLIBv06_API const char* ZBUFFv06_getErrorName(size_t errorCode);
+
+/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
+*   These sizes are just hints, they tend to offer better latency */
+ZSTDLIBv06_API size_t ZBUFFv06_recommendedDInSize(void);
+ZSTDLIBv06_API size_t ZBUFFv06_recommendedDOutSize(void);
+
+
+/*-*************************************
+*  Constants
+***************************************/
+#define ZSTDv06_MAGICNUMBER 0xFD2FB526   /* v0.6 */
+
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif  /* ZSTDv06_BUFFERED_H */
diff --git a/vendor/github.com/DataDog/zstd/zstd_v07.c b/vendor/github.com/DataDog/zstd/zstd_v07.c
new file mode 100644
index 0000000..70b170f
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v07.c
@@ -0,0 +1,4502 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/*- Dependencies -*/
+#include <stddef.h>     /* size_t, ptrdiff_t */
+#include <string.h>     /* memcpy */
+#include <stdlib.h>     /* malloc, free, qsort */
+
+#ifndef XXH_STATIC_LINKING_ONLY
+#  define XXH_STATIC_LINKING_ONLY    /* XXH64_state_t */
+#endif
+#include "xxhash.h"                  /* XXH64_* */
+#include "zstd_v07.h"
+
+#define FSEv07_STATIC_LINKING_ONLY   /* FSEv07_MIN_TABLELOG */
+#define HUFv07_STATIC_LINKING_ONLY   /* HUFv07_TABLELOG_ABSOLUTEMAX */
+#define ZSTDv07_STATIC_LINKING_ONLY
+
+#include "error_private.h"
+
+
+#ifdef ZSTDv07_STATIC_LINKING_ONLY
+
+/* ====================================================================================
+ * The definitions in this section are considered experimental.
+ * They should never be used with a dynamic library, as they may change in the future.
+ * They are provided for advanced usages.
+ * Use them only in association with static linking.
+ * ==================================================================================== */
+
+/*--- Constants ---*/
+#define ZSTDv07_MAGIC_SKIPPABLE_START  0x184D2A50U
+
+#define ZSTDv07_WINDOWLOG_MAX_32  25
+#define ZSTDv07_WINDOWLOG_MAX_64  27
+#define ZSTDv07_WINDOWLOG_MAX    ((U32)(MEM_32bits() ? ZSTDv07_WINDOWLOG_MAX_32 : ZSTDv07_WINDOWLOG_MAX_64))
+#define ZSTDv07_WINDOWLOG_MIN     18
+#define ZSTDv07_CHAINLOG_MAX     (ZSTDv07_WINDOWLOG_MAX+1)
+#define ZSTDv07_CHAINLOG_MIN       4
+#define ZSTDv07_HASHLOG_MAX       ZSTDv07_WINDOWLOG_MAX
+#define ZSTDv07_HASHLOG_MIN       12
+#define ZSTDv07_HASHLOG3_MAX      17
+#define ZSTDv07_SEARCHLOG_MAX    (ZSTDv07_WINDOWLOG_MAX-1)
+#define ZSTDv07_SEARCHLOG_MIN      1
+#define ZSTDv07_SEARCHLENGTH_MAX   7
+#define ZSTDv07_SEARCHLENGTH_MIN   3
+#define ZSTDv07_TARGETLENGTH_MIN   4
+#define ZSTDv07_TARGETLENGTH_MAX 999
+
+#define ZSTDv07_FRAMEHEADERSIZE_MAX 18    /* for static allocation */
+static const size_t ZSTDv07_frameHeaderSize_min = 5;
+static const size_t ZSTDv07_frameHeaderSize_max = ZSTDv07_FRAMEHEADERSIZE_MAX;
+static const size_t ZSTDv07_skippableHeaderSize = 8;  /* magic number + skippable frame length */
+
+
+/* custom memory allocation functions */
+typedef void* (*ZSTDv07_allocFunction) (void* opaque, size_t size);
+typedef void  (*ZSTDv07_freeFunction) (void* opaque, void* address);
+typedef struct { ZSTDv07_allocFunction customAlloc; ZSTDv07_freeFunction customFree; void* opaque; } ZSTDv07_customMem;
+
+
+/*--- Advanced Decompression functions ---*/
+
+/*! ZSTDv07_estimateDCtxSize() :
+ *  Gives the potential amount of memory allocated to create a ZSTDv07_DCtx */
+ZSTDLIBv07_API size_t ZSTDv07_estimateDCtxSize(void);
+
+/*! ZSTDv07_createDCtx_advanced() :
+ *  Create a ZSTD decompression context using external alloc and free functions */
+ZSTDLIBv07_API ZSTDv07_DCtx* ZSTDv07_createDCtx_advanced(ZSTDv07_customMem customMem);
+
+/*! ZSTDv07_sizeofDCtx() :
+ *  Gives the amount of memory used by a given ZSTDv07_DCtx */
+ZSTDLIBv07_API size_t ZSTDv07_sizeofDCtx(const ZSTDv07_DCtx* dctx);
+
+
+/* ******************************************************************
+*  Buffer-less streaming functions (synchronous mode)
+********************************************************************/
+
+ZSTDLIBv07_API size_t ZSTDv07_decompressBegin(ZSTDv07_DCtx* dctx);
+ZSTDLIBv07_API size_t ZSTDv07_decompressBegin_usingDict(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize);
+ZSTDLIBv07_API void   ZSTDv07_copyDCtx(ZSTDv07_DCtx* dctx, const ZSTDv07_DCtx* preparedDCtx);
+
+ZSTDLIBv07_API size_t ZSTDv07_nextSrcSizeToDecompress(ZSTDv07_DCtx* dctx);
+ZSTDLIBv07_API size_t ZSTDv07_decompressContinue(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+/*
+  Buffer-less streaming decompression (synchronous mode)
+
+  A ZSTDv07_DCtx object is required to track streaming operations.
+  Use ZSTDv07_createDCtx() / ZSTDv07_freeDCtx() to manage it.
+  A ZSTDv07_DCtx object can be re-used multiple times.
+
+  First optional operation is to retrieve frame parameters, using ZSTDv07_getFrameParams(), which doesn't consume the input.
+  It can provide the minimum size of rolling buffer required to properly decompress data (`windowSize`),
+  and optionally the final size of uncompressed content.
+  (Note : content size is an optional info that may not be present. 0 means : content size unknown)
+  Frame parameters are extracted from the beginning of compressed frame.
+  The amount of data to read is variable, from ZSTDv07_frameHeaderSize_min to ZSTDv07_frameHeaderSize_max (so if `srcSize` >= ZSTDv07_frameHeaderSize_max, it will always work)
+  If `srcSize` is too small for operation to succeed, function will return the minimum size it requires to produce a result.
+  Result : 0 when successful, it means the ZSTDv07_frameParams structure has been filled.
+          >0 : means there is not enough data into `src`. Provides the expected size to successfully decode header.
+           errorCode, which can be tested using ZSTDv07_isError()
+
+  Start decompression, with ZSTDv07_decompressBegin() or ZSTDv07_decompressBegin_usingDict().
+  Alternatively, you can copy a prepared context, using ZSTDv07_copyDCtx().
+
+  Then use ZSTDv07_nextSrcSizeToDecompress() and ZSTDv07_decompressContinue() alternatively.
+  ZSTDv07_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTDv07_decompressContinue().
+  ZSTDv07_decompressContinue() requires this exact amount of bytes, or it will fail.
+
+  @result of ZSTDv07_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
+  It can be zero, which is not an error; it just means ZSTDv07_decompressContinue() has decoded some header.
+
+  ZSTDv07_decompressContinue() needs previous data blocks during decompression, up to `windowSize`.
+  They should preferably be located contiguously, prior to current block.
+  Alternatively, a round buffer of sufficient size is also possible. Sufficient size is determined by frame parameters.
+  ZSTDv07_decompressContinue() is very sensitive to contiguity,
+  if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
+    or that previous contiguous segment is large enough to properly handle maximum back-reference.
+
+  A frame is fully decoded when ZSTDv07_nextSrcSizeToDecompress() returns zero.
+  Context can then be reset to start a new decompression.
+
+
+  == Special case : skippable frames ==
+
+  Skippable frames allow the integration of user-defined data into a flow of concatenated frames.
+  Skippable frames will be ignored (skipped) by a decompressor. The format of skippable frame is following:
+  a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
+  b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
+  c) Frame Content - any content (User Data) of length equal to Frame Size
+  For skippable frames ZSTDv07_decompressContinue() always returns 0.
+  For skippable frames ZSTDv07_getFrameParams() returns fparamsPtr->windowLog==0 what means that a frame is skippable.
+  It also returns Frame Size as fparamsPtr->frameContentSize.
+*/
+
+
+/* **************************************
+*  Block functions
+****************************************/
+/*! Block functions produce and decode raw zstd blocks, without frame metadata.
+    Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes).
+    User will have to take in charge required information to regenerate data, such as compressed and content sizes.
+
+    A few rules to respect :
+    - Compressing and decompressing require a context structure
+      + Use ZSTDv07_createCCtx() and ZSTDv07_createDCtx()
+    - It is necessary to init context before starting
+      + compression : ZSTDv07_compressBegin()
+      + decompression : ZSTDv07_decompressBegin()
+      + variants _usingDict() are also allowed
+      + copyCCtx() and copyDCtx() work too
+    - Block size is limited, it must be <= ZSTDv07_getBlockSizeMax()
+      + If you need to compress more, cut data into multiple blocks
+      + Consider using the regular ZSTDv07_compress() instead, as frame metadata costs become negligible when source size is large.
+    - When a block is considered not compressible enough, ZSTDv07_compressBlock() result will be zero.
+      In which case, nothing is produced into `dst`.
+      + User must test for such outcome and deal directly with uncompressed data
+      + ZSTDv07_decompressBlock() doesn't accept uncompressed data as input !!!
+      + In case of multiple successive blocks, decoder must be informed of uncompressed block existence to follow proper history.
+        Use ZSTDv07_insertBlock() in such a case.
+*/
+
+#define ZSTDv07_BLOCKSIZE_ABSOLUTEMAX (128 * 1024)   /* define, for static allocation */
+ZSTDLIBv07_API size_t ZSTDv07_decompressBlock(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+ZSTDLIBv07_API size_t ZSTDv07_insertBlock(ZSTDv07_DCtx* dctx, const void* blockStart, size_t blockSize);  /**< insert block into `dctx` history. Useful for uncompressed blocks */
+
+
+#endif   /* ZSTDv07_STATIC_LINKING_ONLY */
+
+
+/* ******************************************************************
+   mem.h
+   low-level memory access routines
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+#ifndef MEM_H_MODULE
+#define MEM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*-****************************************
+*  Compiler specifics
+******************************************/
+#if defined(_MSC_VER)   /* Visual Studio */
+#   include <stdlib.h>  /* _byteswap_ulong */
+#   include <intrin.h>  /* _byteswap_* */
+#endif
+#if defined(__GNUC__)
+#  define MEM_STATIC static __attribute__((unused))
+#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+#  define MEM_STATIC static inline
+#elif defined(_MSC_VER)
+#  define MEM_STATIC static __inline
+#else
+#  define MEM_STATIC static  /* this version may generate warnings for unused static functions; disable the relevant warning */
+#endif
+
+
+/*-**************************************************************
+*  Basic Types
+*****************************************************************/
+#if  !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+  typedef  uint8_t BYTE;
+  typedef uint16_t U16;
+  typedef  int16_t S16;
+  typedef uint32_t U32;
+  typedef  int32_t S32;
+  typedef uint64_t U64;
+  typedef  int64_t S64;
+#else
+  typedef unsigned char       BYTE;
+  typedef unsigned short      U16;
+  typedef   signed short      S16;
+  typedef unsigned int        U32;
+  typedef   signed int        S32;
+  typedef unsigned long long  U64;
+  typedef   signed long long  S64;
+#endif
+
+
+/*-**************************************************************
+*  Memory I/O
+*****************************************************************/
+/* MEM_FORCE_MEMORY_ACCESS :
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ *            This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ *            It can generate buggy code on targets depending on alignment.
+ *            In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
+#  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+#    define MEM_FORCE_MEMORY_ACCESS 2
+#  elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
+  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
+#    define MEM_FORCE_MEMORY_ACCESS 1
+#  endif
+#endif
+
+MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
+MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
+
+MEM_STATIC unsigned MEM_isLittleEndian(void)
+{
+    const union { U32 u; BYTE c[4]; } one = { 1 };   /* don't use static : performance detrimental  */
+    return one.c[0];
+}
+
+#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
+
+/* violates C standard, by lying on structure alignment.
+Only use if no other choice to achieve best performance on target platform */
+MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
+MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
+MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
+
+#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign;
+
+MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
+MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
+
+#else
+
+/* default method, safe and standard.
+   can sometimes prove slower */
+
+MEM_STATIC U16 MEM_read16(const void* memPtr)
+{
+    U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U32 MEM_read32(const void* memPtr)
+{
+    U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC U64 MEM_read64(const void* memPtr)
+{
+    U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+MEM_STATIC void MEM_write16(void* memPtr, U16 value)
+{
+    memcpy(memPtr, &value, sizeof(value));
+}
+
+#endif /* MEM_FORCE_MEMORY_ACCESS */
+
+MEM_STATIC U32 MEM_swap32(U32 in)
+{
+#if defined(_MSC_VER)     /* Visual Studio */
+    return _byteswap_ulong(in);
+#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
+    return __builtin_bswap32(in);
+#else
+    return  ((in << 24) & 0xff000000 ) |
+            ((in <<  8) & 0x00ff0000 ) |
+            ((in >>  8) & 0x0000ff00 ) |
+            ((in >> 24) & 0x000000ff );
+#endif
+}
+
+MEM_STATIC U64 MEM_swap64(U64 in)
+{
+#if defined(_MSC_VER)     /* Visual Studio */
+    return _byteswap_uint64(in);
+#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
+    return __builtin_bswap64(in);
+#else
+    return  ((in << 56) & 0xff00000000000000ULL) |
+            ((in << 40) & 0x00ff000000000000ULL) |
+            ((in << 24) & 0x0000ff0000000000ULL) |
+            ((in << 8)  & 0x000000ff00000000ULL) |
+            ((in >> 8)  & 0x00000000ff000000ULL) |
+            ((in >> 24) & 0x0000000000ff0000ULL) |
+            ((in >> 40) & 0x000000000000ff00ULL) |
+            ((in >> 56) & 0x00000000000000ffULL);
+#endif
+}
+
+
+/*=== Little endian r/w ===*/
+
+MEM_STATIC U16 MEM_readLE16(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read16(memPtr);
+    else {
+        const BYTE* p = (const BYTE*)memPtr;
+        return (U16)(p[0] + (p[1]<<8));
+    }
+}
+
+MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
+{
+    if (MEM_isLittleEndian()) {
+        MEM_write16(memPtr, val);
+    } else {
+        BYTE* p = (BYTE*)memPtr;
+        p[0] = (BYTE)val;
+        p[1] = (BYTE)(val>>8);
+    }
+}
+
+MEM_STATIC U32 MEM_readLE32(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read32(memPtr);
+    else
+        return MEM_swap32(MEM_read32(memPtr));
+}
+
+
+MEM_STATIC U64 MEM_readLE64(const void* memPtr)
+{
+    if (MEM_isLittleEndian())
+        return MEM_read64(memPtr);
+    else
+        return MEM_swap64(MEM_read64(memPtr));
+}
+
+MEM_STATIC size_t MEM_readLEST(const void* memPtr)
+{
+    if (MEM_32bits())
+        return (size_t)MEM_readLE32(memPtr);
+    else
+        return (size_t)MEM_readLE64(memPtr);
+}
+
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* MEM_H_MODULE */
+/* ******************************************************************
+   bitstream
+   Part of FSE library
+   header file (to include)
+   Copyright (C) 2013-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef BITSTREAM_H_MODULE
+#define BITSTREAM_H_MODULE
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*
+*  This API consists of small unitary functions, which must be inlined for best performance.
+*  Since link-time-optimization is not available for all compilers,
+*  these functions are defined into a .h to be included.
+*/
+
+
+/*=========================================
+*  Target specific
+=========================================*/
+#if defined(__BMI__) && defined(__GNUC__)
+#  include <immintrin.h>   /* support for bextr (experimental) */
+#endif
+
+/*-********************************************
+*  bitStream decoding API (read backward)
+**********************************************/
+typedef struct
+{
+    size_t   bitContainer;
+    unsigned bitsConsumed;
+    const char* ptr;
+    const char* start;
+} BITv07_DStream_t;
+
+typedef enum { BITv07_DStream_unfinished = 0,
+               BITv07_DStream_endOfBuffer = 1,
+               BITv07_DStream_completed = 2,
+               BITv07_DStream_overflow = 3 } BITv07_DStream_status;  /* result of BITv07_reloadDStream() */
+               /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
+
+MEM_STATIC size_t   BITv07_initDStream(BITv07_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
+MEM_STATIC size_t   BITv07_readBits(BITv07_DStream_t* bitD, unsigned nbBits);
+MEM_STATIC BITv07_DStream_status BITv07_reloadDStream(BITv07_DStream_t* bitD);
+MEM_STATIC unsigned BITv07_endOfDStream(const BITv07_DStream_t* bitD);
+
+
+
+/*-****************************************
+*  unsafe API
+******************************************/
+MEM_STATIC size_t BITv07_readBitsFast(BITv07_DStream_t* bitD, unsigned nbBits);
+/* faster, but works only if nbBits >= 1 */
+
+
+
+/*-**************************************************************
+*  Internal functions
+****************************************************************/
+MEM_STATIC unsigned BITv07_highbit32 (U32 val)
+{
+#   if defined(_MSC_VER)   /* Visual */
+    unsigned long r=0;
+    _BitScanReverse ( &r, val );
+    return (unsigned) r;
+#   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */
+    return 31 - __builtin_clz (val);
+#   else   /* Software version */
+    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
+    U32 v = val;
+    v |= v >> 1;
+    v |= v >> 2;
+    v |= v >> 4;
+    v |= v >> 8;
+    v |= v >> 16;
+    return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
+#   endif
+}
+
+
+
+/*-********************************************************
+* bitStream decoding
+**********************************************************/
+/*! BITv07_initDStream() :
+*   Initialize a BITv07_DStream_t.
+*   `bitD` : a pointer to an already allocated BITv07_DStream_t structure.
+*   `srcSize` must be the *exact* size of the bitStream, in bytes.
+*   @return : size of stream (== srcSize) or an errorCode if a problem is detected
+*/
+MEM_STATIC size_t BITv07_initDStream(BITv07_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
+{
+    if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
+
+    if (srcSize >=  sizeof(bitD->bitContainer)) {  /* normal case */
+        bitD->start = (const char*)srcBuffer;
+        bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);
+        { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+          bitD->bitsConsumed = lastByte ? 8 - BITv07_highbit32(lastByte) : 0;
+          if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
+    } else {
+        bitD->start = (const char*)srcBuffer;
+        bitD->ptr   = bitD->start;
+        bitD->bitContainer = *(const BYTE*)(bitD->start);
+        switch(srcSize)
+        {
+            case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);/* fall-through */
+            case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);/* fall-through */
+            case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);/* fall-through */
+            case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; /* fall-through */
+            case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; /* fall-through */
+            case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) <<  8; /* fall-through */
+            default: break;
+        }
+        { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
+          bitD->bitsConsumed = lastByte ? 8 - BITv07_highbit32(lastByte) : 0;
+          if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
+        bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
+    }
+
+    return srcSize;
+}
+
+
+ MEM_STATIC size_t BITv07_lookBits(const BITv07_DStream_t* bitD, U32 nbBits)
+{
+    U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
+    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
+}
+
+/*! BITv07_lookBitsFast() :
+*   unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BITv07_lookBitsFast(const BITv07_DStream_t* bitD, U32 nbBits)
+{
+    U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
+    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
+}
+
+MEM_STATIC void BITv07_skipBits(BITv07_DStream_t* bitD, U32 nbBits)
+{
+    bitD->bitsConsumed += nbBits;
+}
+
+MEM_STATIC size_t BITv07_readBits(BITv07_DStream_t* bitD, U32 nbBits)
+{
+    size_t const value = BITv07_lookBits(bitD, nbBits);
+    BITv07_skipBits(bitD, nbBits);
+    return value;
+}
+
+/*! BITv07_readBitsFast() :
+*   unsafe version; only works only if nbBits >= 1 */
+MEM_STATIC size_t BITv07_readBitsFast(BITv07_DStream_t* bitD, U32 nbBits)
+{
+    size_t const value = BITv07_lookBitsFast(bitD, nbBits);
+    BITv07_skipBits(bitD, nbBits);
+    return value;
+}
+
+MEM_STATIC BITv07_DStream_status BITv07_reloadDStream(BITv07_DStream_t* bitD)
+{
+    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should not happen => corruption detected */
+        return BITv07_DStream_overflow;
+
+    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
+        bitD->ptr -= bitD->bitsConsumed >> 3;
+        bitD->bitsConsumed &= 7;
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);
+        return BITv07_DStream_unfinished;
+    }
+    if (bitD->ptr == bitD->start) {
+        if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BITv07_DStream_endOfBuffer;
+        return BITv07_DStream_completed;
+    }
+    {   U32 nbBytes = bitD->bitsConsumed >> 3;
+        BITv07_DStream_status result = BITv07_DStream_unfinished;
+        if (bitD->ptr - nbBytes < bitD->start) {
+            nbBytes = (U32)(bitD->ptr - bitD->start);  /* ptr > start */
+            result = BITv07_DStream_endOfBuffer;
+        }
+        bitD->ptr -= nbBytes;
+        bitD->bitsConsumed -= nbBytes*8;
+        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */
+        return result;
+    }
+}
+
+/*! BITv07_endOfDStream() :
+*   @return Tells if DStream has exactly reached its end (all bits consumed).
+*/
+MEM_STATIC unsigned BITv07_endOfDStream(const BITv07_DStream_t* DStream)
+{
+    return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
+}
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* BITSTREAM_H_MODULE */
+/* ******************************************************************
+   FSE : Finite State Entropy codec
+   Public Prototypes declaration
+   Copyright (C) 2013-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef FSEv07_H
+#define FSEv07_H
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+
+/*-****************************************
+*  FSE simple functions
+******************************************/
+
+/*! FSEv07_decompress():
+    Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
+    into already allocated destination buffer 'dst', of size 'dstCapacity'.
+    @return : size of regenerated data (<= maxDstSize),
+              or an error code, which can be tested using FSEv07_isError() .
+
+    ** Important ** : FSEv07_decompress() does not decompress non-compressible nor RLE data !!!
+    Why ? : making this distinction requires a header.
+    Header management is intentionally delegated to the user layer, which can better manage special cases.
+*/
+size_t FSEv07_decompress(void* dst,  size_t dstCapacity,
+                const void* cSrc, size_t cSrcSize);
+
+
+/* Error Management */
+unsigned    FSEv07_isError(size_t code);        /* tells if a return value is an error code */
+const char* FSEv07_getErrorName(size_t code);   /* provides error code string (useful for debugging) */
+
+
+/*-*****************************************
+*  FSE detailed API
+******************************************/
+/*!
+FSEv07_decompress() does the following:
+1. read normalized counters with readNCount()
+2. build decoding table 'DTable' from normalized counters
+3. decode the data stream using decoding table 'DTable'
+
+The following API allows targeting specific sub-functions for advanced tasks.
+For example, it's possible to compress several blocks using the same 'CTable',
+or to save and provide normalized distribution using external method.
+*/
+
+
+/* *** DECOMPRESSION *** */
+
+/*! FSEv07_readNCount():
+    Read compactly saved 'normalizedCounter' from 'rBuffer'.
+    @return : size read from 'rBuffer',
+              or an errorCode, which can be tested using FSEv07_isError().
+              maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
+size_t FSEv07_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
+
+/*! Constructor and Destructor of FSEv07_DTable.
+    Note that its size depends on 'tableLog' */
+typedef unsigned FSEv07_DTable;   /* don't allocate that. It's just a way to be more restrictive than void* */
+FSEv07_DTable* FSEv07_createDTable(unsigned tableLog);
+void        FSEv07_freeDTable(FSEv07_DTable* dt);
+
+/*! FSEv07_buildDTable():
+    Builds 'dt', which must be already allocated, using FSEv07_createDTable().
+    return : 0, or an errorCode, which can be tested using FSEv07_isError() */
+size_t FSEv07_buildDTable (FSEv07_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
+
+/*! FSEv07_decompress_usingDTable():
+    Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
+    into `dst` which must be already allocated.
+    @return : size of regenerated data (necessarily <= `dstCapacity`),
+              or an errorCode, which can be tested using FSEv07_isError() */
+size_t FSEv07_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSEv07_DTable* dt);
+
+/*!
+Tutorial :
+----------
+(Note : these functions only decompress FSE-compressed blocks.
+ If block is uncompressed, use memcpy() instead
+ If block is a single repeated byte, use memset() instead )
+
+The first step is to obtain the normalized frequencies of symbols.
+This can be performed by FSEv07_readNCount() if it was saved using FSEv07_writeNCount().
+'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
+In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
+or size the table to handle worst case situations (typically 256).
+FSEv07_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
+The result of FSEv07_readNCount() is the number of bytes read from 'rBuffer'.
+Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
+If there is an error, the function will return an error code, which can be tested using FSEv07_isError().
+
+The next step is to build the decompression tables 'FSEv07_DTable' from 'normalizedCounter'.
+This is performed by the function FSEv07_buildDTable().
+The space required by 'FSEv07_DTable' must be already allocated using FSEv07_createDTable().
+If there is an error, the function will return an error code, which can be tested using FSEv07_isError().
+
+`FSEv07_DTable` can then be used to decompress `cSrc`, with FSEv07_decompress_usingDTable().
+`cSrcSize` must be strictly correct, otherwise decompression will fail.
+FSEv07_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
+If there is an error, the function will return an error code, which can be tested using FSEv07_isError(). (ex: dst buffer too small)
+*/
+
+
+#ifdef FSEv07_STATIC_LINKING_ONLY
+
+
+/* *****************************************
+*  Static allocation
+*******************************************/
+/* FSE buffer bounds */
+#define FSEv07_NCOUNTBOUND 512
+#define FSEv07_BLOCKBOUND(size) (size + (size>>7))
+
+/* It is possible to statically allocate FSE CTable/DTable as a table of unsigned using below macros */
+#define FSEv07_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<<maxTableLog))
+
+
+/* *****************************************
+*  FSE advanced API
+*******************************************/
+size_t FSEv07_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
+/**< same as FSEv07_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr  */
+
+unsigned FSEv07_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
+/**< same as FSEv07_optimalTableLog(), which used `minus==2` */
+
+size_t FSEv07_buildDTable_raw (FSEv07_DTable* dt, unsigned nbBits);
+/**< build a fake FSEv07_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
+
+size_t FSEv07_buildDTable_rle (FSEv07_DTable* dt, unsigned char symbolValue);
+/**< build a fake FSEv07_DTable, designed to always generate the same symbolValue */
+
+
+
+/* *****************************************
+*  FSE symbol decompression API
+*******************************************/
+typedef struct
+{
+    size_t      state;
+    const void* table;   /* precise table may vary, depending on U16 */
+} FSEv07_DState_t;
+
+
+static void     FSEv07_initDState(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD, const FSEv07_DTable* dt);
+
+static unsigned char FSEv07_decodeSymbol(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD);
+
+
+
+/* *****************************************
+*  FSE unsafe API
+*******************************************/
+static unsigned char FSEv07_decodeSymbolFast(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD);
+/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
+
+
+/* ======    Decompression    ====== */
+
+typedef struct {
+    U16 tableLog;
+    U16 fastMode;
+} FSEv07_DTableHeader;   /* sizeof U32 */
+
+typedef struct
+{
+    unsigned short newState;
+    unsigned char  symbol;
+    unsigned char  nbBits;
+} FSEv07_decode_t;   /* size == U32 */
+
+MEM_STATIC void FSEv07_initDState(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD, const FSEv07_DTable* dt)
+{
+    const void* ptr = dt;
+    const FSEv07_DTableHeader* const DTableH = (const FSEv07_DTableHeader*)ptr;
+    DStatePtr->state = BITv07_readBits(bitD, DTableH->tableLog);
+    BITv07_reloadDStream(bitD);
+    DStatePtr->table = dt + 1;
+}
+
+MEM_STATIC BYTE FSEv07_peekSymbol(const FSEv07_DState_t* DStatePtr)
+{
+    FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    return DInfo.symbol;
+}
+
+MEM_STATIC void FSEv07_updateState(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD)
+{
+    FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    U32 const nbBits = DInfo.nbBits;
+    size_t const lowBits = BITv07_readBits(bitD, nbBits);
+    DStatePtr->state = DInfo.newState + lowBits;
+}
+
+MEM_STATIC BYTE FSEv07_decodeSymbol(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD)
+{
+    FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    U32 const nbBits = DInfo.nbBits;
+    BYTE const symbol = DInfo.symbol;
+    size_t const lowBits = BITv07_readBits(bitD, nbBits);
+
+    DStatePtr->state = DInfo.newState + lowBits;
+    return symbol;
+}
+
+/*! FSEv07_decodeSymbolFast() :
+    unsafe, only works if no symbol has a probability > 50% */
+MEM_STATIC BYTE FSEv07_decodeSymbolFast(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD)
+{
+    FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state];
+    U32 const nbBits = DInfo.nbBits;
+    BYTE const symbol = DInfo.symbol;
+    size_t const lowBits = BITv07_readBitsFast(bitD, nbBits);
+
+    DStatePtr->state = DInfo.newState + lowBits;
+    return symbol;
+}
+
+
+
+#ifndef FSEv07_COMMONDEFS_ONLY
+
+/* **************************************************************
+*  Tuning parameters
+****************************************************************/
+/*!MEMORY_USAGE :
+*  Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+*  Increasing memory usage improves compression ratio
+*  Reduced memory usage can improve speed, due to cache effect
+*  Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
+#define FSEv07_MAX_MEMORY_USAGE 14
+#define FSEv07_DEFAULT_MEMORY_USAGE 13
+
+/*!FSEv07_MAX_SYMBOL_VALUE :
+*  Maximum symbol value authorized.
+*  Required for proper stack allocation */
+#define FSEv07_MAX_SYMBOL_VALUE 255
+
+
+/* **************************************************************
+*  template functions type & suffix
+****************************************************************/
+#define FSEv07_FUNCTION_TYPE BYTE
+#define FSEv07_FUNCTION_EXTENSION
+#define FSEv07_DECODE_TYPE FSEv07_decode_t
+
+
+#endif   /* !FSEv07_COMMONDEFS_ONLY */
+
+
+/* ***************************************************************
+*  Constants
+*****************************************************************/
+#define FSEv07_MAX_TABLELOG  (FSEv07_MAX_MEMORY_USAGE-2)
+#define FSEv07_MAX_TABLESIZE (1U<<FSEv07_MAX_TABLELOG)
+#define FSEv07_MAXTABLESIZE_MASK (FSEv07_MAX_TABLESIZE-1)
+#define FSEv07_DEFAULT_TABLELOG (FSEv07_DEFAULT_MEMORY_USAGE-2)
+#define FSEv07_MIN_TABLELOG 5
+
+#define FSEv07_TABLELOG_ABSOLUTE_MAX 15
+#if FSEv07_MAX_TABLELOG > FSEv07_TABLELOG_ABSOLUTE_MAX
+#  error "FSEv07_MAX_TABLELOG > FSEv07_TABLELOG_ABSOLUTE_MAX is not supported"
+#endif
+
+#define FSEv07_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3)
+
+
+#endif /* FSEv07_STATIC_LINKING_ONLY */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif  /* FSEv07_H */
+/* ******************************************************************
+   Huffman coder, part of New Generation Entropy library
+   header file
+   Copyright (C) 2013-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+   You can contact the author at :
+   - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
+****************************************************************** */
+#ifndef HUFv07_H_298734234
+#define HUFv07_H_298734234
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+
+/* *** simple functions *** */
+/**
+HUFv07_decompress() :
+    Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
+    into already allocated buffer 'dst', of minimum size 'dstSize'.
+    `dstSize` : **must** be the ***exact*** size of original (uncompressed) data.
+    Note : in contrast with FSE, HUFv07_decompress can regenerate
+           RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
+           because it knows size to regenerate.
+    @return : size of regenerated data (== dstSize),
+              or an error code, which can be tested using HUFv07_isError()
+*/
+size_t HUFv07_decompress(void* dst,  size_t dstSize,
+                const void* cSrc, size_t cSrcSize);
+
+
+/* ****************************************
+*  Tool functions
+******************************************/
+#define HUFv07_BLOCKSIZE_MAX (128 * 1024)
+
+/* Error Management */
+unsigned    HUFv07_isError(size_t code);        /**< tells if a return value is an error code */
+const char* HUFv07_getErrorName(size_t code);   /**< provides error code string (useful for debugging) */
+
+
+/* *** Advanced function *** */
+
+
+#ifdef HUFv07_STATIC_LINKING_ONLY
+
+
+/* *** Constants *** */
+#define HUFv07_TABLELOG_ABSOLUTEMAX  16   /* absolute limit of HUFv07_MAX_TABLELOG. Beyond that value, code does not work */
+#define HUFv07_TABLELOG_MAX  12           /* max configured tableLog (for static allocation); can be modified up to HUFv07_ABSOLUTEMAX_TABLELOG */
+#define HUFv07_TABLELOG_DEFAULT  11       /* tableLog by default, when not specified */
+#define HUFv07_SYMBOLVALUE_MAX 255
+#if (HUFv07_TABLELOG_MAX > HUFv07_TABLELOG_ABSOLUTEMAX)
+#  error "HUFv07_TABLELOG_MAX is too large !"
+#endif
+
+
+/* ****************************************
+*  Static allocation
+******************************************/
+/* HUF buffer bounds */
+#define HUFv07_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true if incompressible pre-filtered with fast heuristic */
+
+/* static allocation of HUF's DTable */
+typedef U32 HUFv07_DTable;
+#define HUFv07_DTABLE_SIZE(maxTableLog)   (1 + (1<<(maxTableLog)))
+#define HUFv07_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
+        HUFv07_DTable DTable[HUFv07_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1)*0x1000001) }
+#define HUFv07_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
+        HUFv07_DTable DTable[HUFv07_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog)*0x1000001) }
+
+
+/* ****************************************
+*  Advanced decompression functions
+******************************************/
+size_t HUFv07_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
+size_t HUFv07_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
+
+size_t HUFv07_decompress4X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< decodes RLE and uncompressed */
+size_t HUFv07_decompress4X_hufOnly(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
+size_t HUFv07_decompress4X2_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
+size_t HUFv07_decompress4X4_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
+
+size_t HUFv07_decompress1X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+size_t HUFv07_decompress1X2_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< single-symbol decoder */
+size_t HUFv07_decompress1X4_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /**< double-symbols decoder */
+
+
+/* ****************************************
+*  HUF detailed API
+******************************************/
+/*!
+The following API allows targeting specific sub-functions for advanced tasks.
+For example, it's possible to compress several blocks using the same 'CTable',
+or to save and regenerate 'CTable' using external methods.
+*/
+/* FSEv07_count() : find it within "fse.h" */
+
+/*! HUFv07_readStats() :
+    Read compact Huffman tree, saved by HUFv07_writeCTable().
+    `huffWeight` is destination buffer.
+    @return : size read from `src` , or an error Code .
+    Note : Needed by HUFv07_readCTable() and HUFv07_readDTableXn() . */
+size_t HUFv07_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+                     U32* nbSymbolsPtr, U32* tableLogPtr,
+                     const void* src, size_t srcSize);
+
+
+/*
+HUFv07_decompress() does the following:
+1. select the decompression algorithm (X2, X4) based on pre-computed heuristics
+2. build Huffman table from save, using HUFv07_readDTableXn()
+3. decode 1 or 4 segments in parallel using HUFv07_decompressSXn_usingDTable
+*/
+
+/** HUFv07_selectDecoder() :
+*   Tells which decoder is likely to decode faster,
+*   based on a set of pre-determined metrics.
+*   @return : 0==HUFv07_decompress4X2, 1==HUFv07_decompress4X4 .
+*   Assumption : 0 < cSrcSize < dstSize <= 128 KB */
+U32 HUFv07_selectDecoder (size_t dstSize, size_t cSrcSize);
+
+size_t HUFv07_readDTableX2 (HUFv07_DTable* DTable, const void* src, size_t srcSize);
+size_t HUFv07_readDTableX4 (HUFv07_DTable* DTable, const void* src, size_t srcSize);
+
+size_t HUFv07_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
+size_t HUFv07_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
+size_t HUFv07_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
+
+
+/* single stream variants */
+size_t HUFv07_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* single-symbol decoder */
+size_t HUFv07_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);   /* double-symbol decoder */
+
+size_t HUFv07_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
+size_t HUFv07_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
+size_t HUFv07_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
+
+
+#endif /* HUFv07_STATIC_LINKING_ONLY */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif   /* HUFv07_H_298734234 */
+/*
+   Common functions of New Generation Entropy library
+   Copyright (C) 2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+*************************************************************************** */
+
+
+
+/*-****************************************
+*  FSE Error Management
+******************************************/
+unsigned FSEv07_isError(size_t code) { return ERR_isError(code); }
+
+const char* FSEv07_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/* **************************************************************
+*  HUF Error Management
+****************************************************************/
+unsigned HUFv07_isError(size_t code) { return ERR_isError(code); }
+
+const char* HUFv07_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+/*-**************************************************************
+*  FSE NCount encoding-decoding
+****************************************************************/
+static short FSEv07_abs(short a) { return (short)(a<0 ? -a : a); }
+
+size_t FSEv07_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
+                 const void* headerBuffer, size_t hbSize)
+{
+    const BYTE* const istart = (const BYTE*) headerBuffer;
+    const BYTE* const iend = istart + hbSize;
+    const BYTE* ip = istart;
+    int nbBits;
+    int remaining;
+    int threshold;
+    U32 bitStream;
+    int bitCount;
+    unsigned charnum = 0;
+    int previous0 = 0;
+
+    if (hbSize < 4) return ERROR(srcSize_wrong);
+    bitStream = MEM_readLE32(ip);
+    nbBits = (bitStream & 0xF) + FSEv07_MIN_TABLELOG;   /* extract tableLog */
+    if (nbBits > FSEv07_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
+    bitStream >>= 4;
+    bitCount = 4;
+    *tableLogPtr = nbBits;
+    remaining = (1<<nbBits)+1;
+    threshold = 1<<nbBits;
+    nbBits++;
+
+    while ((remaining>1) && (charnum<=*maxSVPtr)) {
+        if (previous0) {
+            unsigned n0 = charnum;
+            while ((bitStream & 0xFFFF) == 0xFFFF) {
+                n0+=24;
+                if (ip < iend-5) {
+                    ip+=2;
+                    bitStream = MEM_readLE32(ip) >> bitCount;
+                } else {
+                    bitStream >>= 16;
+                    bitCount+=16;
+            }   }
+            while ((bitStream & 3) == 3) {
+                n0+=3;
+                bitStream>>=2;
+                bitCount+=2;
+            }
+            n0 += bitStream & 3;
+            bitCount += 2;
+            if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
+            while (charnum < n0) normalizedCounter[charnum++] = 0;
+            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+                ip += bitCount>>3;
+                bitCount &= 7;
+                bitStream = MEM_readLE32(ip) >> bitCount;
+            }
+            else
+                bitStream >>= 2;
+        }
+        {   short const max = (short)((2*threshold-1)-remaining);
+            short count;
+
+            if ((bitStream & (threshold-1)) < (U32)max) {
+                count = (short)(bitStream & (threshold-1));
+                bitCount   += nbBits-1;
+            } else {
+                count = (short)(bitStream & (2*threshold-1));
+                if (count >= threshold) count -= max;
+                bitCount   += nbBits;
+            }
+
+            count--;   /* extra accuracy */
+            remaining -= FSEv07_abs(count);
+            normalizedCounter[charnum++] = count;
+            previous0 = !count;
+            while (remaining < threshold) {
+                nbBits--;
+                threshold >>= 1;
+            }
+
+            if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
+                ip += bitCount>>3;
+                bitCount &= 7;
+            } else {
+                bitCount -= (int)(8 * (iend - 4 - ip));
+                ip = iend - 4;
+            }
+            bitStream = MEM_readLE32(ip) >> (bitCount & 31);
+    }   }   /* while ((remaining>1) && (charnum<=*maxSVPtr)) */
+    if (remaining != 1) return ERROR(GENERIC);
+    *maxSVPtr = charnum-1;
+
+    ip += (bitCount+7)>>3;
+    if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
+    return ip-istart;
+}
+
+
+/*! HUFv07_readStats() :
+    Read compact Huffman tree, saved by HUFv07_writeCTable().
+    `huffWeight` is destination buffer.
+    @return : size read from `src` , or an error Code .
+    Note : Needed by HUFv07_readCTable() and HUFv07_readDTableXn() .
+*/
+size_t HUFv07_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
+                     U32* nbSymbolsPtr, U32* tableLogPtr,
+                     const void* src, size_t srcSize)
+{
+    U32 weightTotal;
+    const BYTE* ip = (const BYTE*) src;
+    size_t iSize;
+    size_t oSize;
+
+    if (!srcSize) return ERROR(srcSize_wrong);
+    iSize = ip[0];
+    //memset(huffWeight, 0, hwSize);   /* is not necessary, even though some analyzer complain ... */
+
+    if (iSize >= 128)  { /* special header */
+        if (iSize >= (242)) {  /* RLE */
+            static U32 l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
+            oSize = l[iSize-242];
+            memset(huffWeight, 1, hwSize);
+            iSize = 0;
+        }
+        else {   /* Incompressible */
+            oSize = iSize - 127;
+            iSize = ((oSize+1)/2);
+            if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+            if (oSize >= hwSize) return ERROR(corruption_detected);
+            ip += 1;
+            {   U32 n;
+                for (n=0; n<oSize; n+=2) {
+                    huffWeight[n]   = ip[n/2] >> 4;
+                    huffWeight[n+1] = ip[n/2] & 15;
+    }   }   }   }
+    else  {   /* header compressed with FSE (normal case) */
+        if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
+        oSize = FSEv07_decompress(huffWeight, hwSize-1, ip+1, iSize);   /* max (hwSize-1) values decoded, as last one is implied */
+        if (FSEv07_isError(oSize)) return oSize;
+    }
+
+    /* collect weight stats */
+    memset(rankStats, 0, (HUFv07_TABLELOG_ABSOLUTEMAX + 1) * sizeof(U32));
+    weightTotal = 0;
+    {   U32 n; for (n=0; n<oSize; n++) {
+            if (huffWeight[n] >= HUFv07_TABLELOG_ABSOLUTEMAX) return ERROR(corruption_detected);
+            rankStats[huffWeight[n]]++;
+            weightTotal += (1 << huffWeight[n]) >> 1;
+    }   }
+    if (weightTotal == 0) return ERROR(corruption_detected);
+
+    /* get last non-null symbol weight (implied, total must be 2^n) */
+    {   U32 const tableLog = BITv07_highbit32(weightTotal) + 1;
+        if (tableLog > HUFv07_TABLELOG_ABSOLUTEMAX) return ERROR(corruption_detected);
+        *tableLogPtr = tableLog;
+        /* determine last weight */
+        {   U32 const total = 1 << tableLog;
+            U32 const rest = total - weightTotal;
+            U32 const verif = 1 << BITv07_highbit32(rest);
+            U32 const lastWeight = BITv07_highbit32(rest) + 1;
+            if (verif != rest) return ERROR(corruption_detected);    /* last value must be a clean power of 2 */
+            huffWeight[oSize] = (BYTE)lastWeight;
+            rankStats[lastWeight]++;
+    }   }
+
+    /* check tree construction validity */
+    if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected);   /* by construction : at least 2 elts of rank 1, must be even */
+
+    /* results */
+    *nbSymbolsPtr = (U32)(oSize+1);
+    return iSize+1;
+}
+/* ******************************************************************
+   FSE : Finite State Entropy decoder
+   Copyright (C) 2013-2015, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+
+/* **************************************************************
+*  Compiler specifics
+****************************************************************/
+#ifdef _MSC_VER    /* Visual Studio */
+#  define FORCE_INLINE static __forceinline
+#  include <intrin.h>                    /* For Visual 2005 */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#  pragma warning(disable : 4214)        /* disable: C4214: non-int bitfields */
+#else
+#  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
+#    ifdef __GNUC__
+#      define FORCE_INLINE static inline __attribute__((always_inline))
+#    else
+#      define FORCE_INLINE static inline
+#    endif
+#  else
+#    define FORCE_INLINE static
+#  endif /* __STDC_VERSION__ */
+#endif
+
+
+/* **************************************************************
+*  Error Management
+****************************************************************/
+#define FSEv07_isError ERR_isError
+#define FSEv07_STATIC_ASSERT(c) { enum { FSEv07_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+
+
+/* **************************************************************
+*  Complex types
+****************************************************************/
+typedef U32 DTable_max_t[FSEv07_DTABLE_SIZE_U32(FSEv07_MAX_TABLELOG)];
+
+
+/* **************************************************************
+*  Templates
+****************************************************************/
+/*
+  designed to be included
+  for type-specific functions (template emulation in C)
+  Objective is to write these functions only once, for improved maintenance
+*/
+
+/* safety checks */
+#ifndef FSEv07_FUNCTION_EXTENSION
+#  error "FSEv07_FUNCTION_EXTENSION must be defined"
+#endif
+#ifndef FSEv07_FUNCTION_TYPE
+#  error "FSEv07_FUNCTION_TYPE must be defined"
+#endif
+
+/* Function names */
+#define FSEv07_CAT(X,Y) X##Y
+#define FSEv07_FUNCTION_NAME(X,Y) FSEv07_CAT(X,Y)
+#define FSEv07_TYPE_NAME(X,Y) FSEv07_CAT(X,Y)
+
+
+/* Function templates */
+FSEv07_DTable* FSEv07_createDTable (unsigned tableLog)
+{
+    if (tableLog > FSEv07_TABLELOG_ABSOLUTE_MAX) tableLog = FSEv07_TABLELOG_ABSOLUTE_MAX;
+    return (FSEv07_DTable*)malloc( FSEv07_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
+}
+
+void FSEv07_freeDTable (FSEv07_DTable* dt)
+{
+    free(dt);
+}
+
+size_t FSEv07_buildDTable(FSEv07_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
+{
+    void* const tdPtr = dt+1;   /* because *dt is unsigned, 32-bits aligned on 32-bits */
+    FSEv07_DECODE_TYPE* const tableDecode = (FSEv07_DECODE_TYPE*) (tdPtr);
+    U16 symbolNext[FSEv07_MAX_SYMBOL_VALUE+1];
+
+    U32 const maxSV1 = maxSymbolValue + 1;
+    U32 const tableSize = 1 << tableLog;
+    U32 highThreshold = tableSize-1;
+
+    /* Sanity Checks */
+    if (maxSymbolValue > FSEv07_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
+    if (tableLog > FSEv07_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
+
+    /* Init, lay down lowprob symbols */
+    {   FSEv07_DTableHeader DTableH;
+        DTableH.tableLog = (U16)tableLog;
+        DTableH.fastMode = 1;
+        {   S16 const largeLimit= (S16)(1 << (tableLog-1));
+            U32 s;
+            for (s=0; s<maxSV1; s++) {
+                if (normalizedCounter[s]==-1) {
+                    tableDecode[highThreshold--].symbol = (FSEv07_FUNCTION_TYPE)s;
+                    symbolNext[s] = 1;
+                } else {
+                    if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
+                    symbolNext[s] = normalizedCounter[s];
+        }   }   }
+        memcpy(dt, &DTableH, sizeof(DTableH));
+    }
+
+    /* Spread symbols */
+    {   U32 const tableMask = tableSize-1;
+        U32 const step = FSEv07_TABLESTEP(tableSize);
+        U32 s, position = 0;
+        for (s=0; s<maxSV1; s++) {
+            int i;
+            for (i=0; i<normalizedCounter[s]; i++) {
+                tableDecode[position].symbol = (FSEv07_FUNCTION_TYPE)s;
+                position = (position + step) & tableMask;
+                while (position > highThreshold) position = (position + step) & tableMask;   /* lowprob area */
+        }   }
+
+        if (position!=0) return ERROR(GENERIC);   /* position must reach all cells once, otherwise normalizedCounter is incorrect */
+    }
+
+    /* Build Decoding table */
+    {   U32 u;
+        for (u=0; u<tableSize; u++) {
+            FSEv07_FUNCTION_TYPE const symbol = (FSEv07_FUNCTION_TYPE)(tableDecode[u].symbol);
+            U16 nextState = symbolNext[symbol]++;
+            tableDecode[u].nbBits = (BYTE) (tableLog - BITv07_highbit32 ((U32)nextState) );
+            tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
+    }   }
+
+    return 0;
+}
+
+
+
+#ifndef FSEv07_COMMONDEFS_ONLY
+
+/*-*******************************************************
+*  Decompression (Byte symbols)
+*********************************************************/
+size_t FSEv07_buildDTable_rle (FSEv07_DTable* dt, BYTE symbolValue)
+{
+    void* ptr = dt;
+    FSEv07_DTableHeader* const DTableH = (FSEv07_DTableHeader*)ptr;
+    void* dPtr = dt + 1;
+    FSEv07_decode_t* const cell = (FSEv07_decode_t*)dPtr;
+
+    DTableH->tableLog = 0;
+    DTableH->fastMode = 0;
+
+    cell->newState = 0;
+    cell->symbol = symbolValue;
+    cell->nbBits = 0;
+
+    return 0;
+}
+
+
+size_t FSEv07_buildDTable_raw (FSEv07_DTable* dt, unsigned nbBits)
+{
+    void* ptr = dt;
+    FSEv07_DTableHeader* const DTableH = (FSEv07_DTableHeader*)ptr;
+    void* dPtr = dt + 1;
+    FSEv07_decode_t* const dinfo = (FSEv07_decode_t*)dPtr;
+    const unsigned tableSize = 1 << nbBits;
+    const unsigned tableMask = tableSize - 1;
+    const unsigned maxSV1 = tableMask+1;
+    unsigned s;
+
+    /* Sanity checks */
+    if (nbBits < 1) return ERROR(GENERIC);         /* min size */
+
+    /* Build Decoding Table */
+    DTableH->tableLog = (U16)nbBits;
+    DTableH->fastMode = 1;
+    for (s=0; s<maxSV1; s++) {
+        dinfo[s].newState = 0;
+        dinfo[s].symbol = (BYTE)s;
+        dinfo[s].nbBits = (BYTE)nbBits;
+    }
+
+    return 0;
+}
+
+FORCE_INLINE size_t FSEv07_decompress_usingDTable_generic(
+          void* dst, size_t maxDstSize,
+    const void* cSrc, size_t cSrcSize,
+    const FSEv07_DTable* dt, const unsigned fast)
+{
+    BYTE* const ostart = (BYTE*) dst;
+    BYTE* op = ostart;
+    BYTE* const omax = op + maxDstSize;
+    BYTE* const olimit = omax-3;
+
+    BITv07_DStream_t bitD;
+    FSEv07_DState_t state1;
+    FSEv07_DState_t state2;
+
+    /* Init */
+    { size_t const errorCode = BITv07_initDStream(&bitD, cSrc, cSrcSize);   /* replaced last arg by maxCompressed Size */
+      if (FSEv07_isError(errorCode)) return errorCode; }
+
+    FSEv07_initDState(&state1, &bitD, dt);
+    FSEv07_initDState(&state2, &bitD, dt);
+
+#define FSEv07_GETSYMBOL(statePtr) fast ? FSEv07_decodeSymbolFast(statePtr, &bitD) : FSEv07_decodeSymbol(statePtr, &bitD)
+
+    /* 4 symbols per loop */
+    for ( ; (BITv07_reloadDStream(&bitD)==BITv07_DStream_unfinished) && (op<olimit) ; op+=4) {
+        op[0] = FSEv07_GETSYMBOL(&state1);
+
+        if (FSEv07_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            BITv07_reloadDStream(&bitD);
+
+        op[1] = FSEv07_GETSYMBOL(&state2);
+
+        if (FSEv07_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            { if (BITv07_reloadDStream(&bitD) > BITv07_DStream_unfinished) { op+=2; break; } }
+
+        op[2] = FSEv07_GETSYMBOL(&state1);
+
+        if (FSEv07_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8)    /* This test must be static */
+            BITv07_reloadDStream(&bitD);
+
+        op[3] = FSEv07_GETSYMBOL(&state2);
+    }
+
+    /* tail */
+    /* note : BITv07_reloadDStream(&bitD) >= FSEv07_DStream_partiallyFilled; Ends at exactly BITv07_DStream_completed */
+    while (1) {
+        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+
+        *op++ = FSEv07_GETSYMBOL(&state1);
+
+        if (BITv07_reloadDStream(&bitD)==BITv07_DStream_overflow) {
+            *op++ = FSEv07_GETSYMBOL(&state2);
+            break;
+        }
+
+        if (op>(omax-2)) return ERROR(dstSize_tooSmall);
+
+        *op++ = FSEv07_GETSYMBOL(&state2);
+
+        if (BITv07_reloadDStream(&bitD)==BITv07_DStream_overflow) {
+            *op++ = FSEv07_GETSYMBOL(&state1);
+            break;
+    }   }
+
+    return op-ostart;
+}
+
+
+size_t FSEv07_decompress_usingDTable(void* dst, size_t originalSize,
+                            const void* cSrc, size_t cSrcSize,
+                            const FSEv07_DTable* dt)
+{
+    const void* ptr = dt;
+    const FSEv07_DTableHeader* DTableH = (const FSEv07_DTableHeader*)ptr;
+    const U32 fastMode = DTableH->fastMode;
+
+    /* select fast mode (static) */
+    if (fastMode) return FSEv07_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
+    return FSEv07_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
+}
+
+
+size_t FSEv07_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
+{
+    const BYTE* const istart = (const BYTE*)cSrc;
+    const BYTE* ip = istart;
+    short counting[FSEv07_MAX_SYMBOL_VALUE+1];
+    DTable_max_t dt;   /* Static analyzer seems unable to understand this table will be properly initialized later */
+    unsigned tableLog;
+    unsigned maxSymbolValue = FSEv07_MAX_SYMBOL_VALUE;
+
+    if (cSrcSize<2) return ERROR(srcSize_wrong);   /* too small input size */
+
+    /* normal FSE decoding mode */
+    {   size_t const NCountLength = FSEv07_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
+        if (FSEv07_isError(NCountLength)) return NCountLength;
+        if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong);   /* too small input size */
+        ip += NCountLength;
+        cSrcSize -= NCountLength;
+    }
+
+    { size_t const errorCode = FSEv07_buildDTable (dt, counting, maxSymbolValue, tableLog);
+      if (FSEv07_isError(errorCode)) return errorCode; }
+
+    return FSEv07_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);   /* always return, even if it is an error code */
+}
+
+
+
+#endif   /* FSEv07_COMMONDEFS_ONLY */
+
+/* ******************************************************************
+   Huffman decoder, part of New Generation Entropy library
+   Copyright (C) 2013-2016, Yann Collet.
+
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are
+   met:
+
+       * Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above
+   copyright notice, this list of conditions and the following disclaimer
+   in the documentation and/or other materials provided with the
+   distribution.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
+    - Public forum : https://groups.google.com/forum/#!forum/lz4c
+****************************************************************** */
+
+/* **************************************************************
+*  Compiler specifics
+****************************************************************/
+#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+/* inline is defined */
+#elif defined(_MSC_VER)
+#  define inline __inline
+#else
+#  define inline /* disable inline */
+#endif
+
+
+#ifdef _MSC_VER    /* Visual Studio */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#endif
+
+
+
+/* **************************************************************
+*  Error Management
+****************************************************************/
+#define HUFv07_STATIC_ASSERT(c) { enum { HUFv07_static_assert = 1/(int)(!!(c)) }; }   /* use only *after* variable declarations */
+
+
+/*-***************************/
+/*  generic DTableDesc       */
+/*-***************************/
+
+typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
+
+static DTableDesc HUFv07_getDTableDesc(const HUFv07_DTable* table)
+{
+    DTableDesc dtd;
+    memcpy(&dtd, table, sizeof(dtd));
+    return dtd;
+}
+
+
+/*-***************************/
+/*  single-symbol decoding   */
+/*-***************************/
+
+typedef struct { BYTE byte; BYTE nbBits; } HUFv07_DEltX2;   /* single-symbol decoding */
+
+size_t HUFv07_readDTableX2 (HUFv07_DTable* DTable, const void* src, size_t srcSize)
+{
+    BYTE huffWeight[HUFv07_SYMBOLVALUE_MAX + 1];
+    U32 rankVal[HUFv07_TABLELOG_ABSOLUTEMAX + 1];   /* large enough for values from 0 to 16 */
+    U32 tableLog = 0;
+    U32 nbSymbols = 0;
+    size_t iSize;
+    void* const dtPtr = DTable + 1;
+    HUFv07_DEltX2* const dt = (HUFv07_DEltX2*)dtPtr;
+
+    HUFv07_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUFv07_DTable));
+    //memset(huffWeight, 0, sizeof(huffWeight));   /* is not necessary, even though some analyzer complain ... */
+
+    iSize = HUFv07_readStats(huffWeight, HUFv07_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
+    if (HUFv07_isError(iSize)) return iSize;
+
+    /* Table header */
+    {   DTableDesc dtd = HUFv07_getDTableDesc(DTable);
+        if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge);   /* DTable too small, huffman tree cannot fit in */
+        dtd.tableType = 0;
+        dtd.tableLog = (BYTE)tableLog;
+        memcpy(DTable, &dtd, sizeof(dtd));
+    }
+
+    /* Prepare ranks */
+    {   U32 n, nextRankStart = 0;
+        for (n=1; n<tableLog+1; n++) {
+            U32 current = nextRankStart;
+            nextRankStart += (rankVal[n] << (n-1));
+            rankVal[n] = current;
+    }   }
+
+    /* fill DTable */
+    {   U32 n;
+        for (n=0; n<nbSymbols; n++) {
+            U32 const w = huffWeight[n];
+            U32 const length = (1 << w) >> 1;
+            U32 i;
+            HUFv07_DEltX2 D;
+            D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
+            for (i = rankVal[w]; i < rankVal[w] + length; i++)
+                dt[i] = D;
+            rankVal[w] += length;
+    }   }
+
+    return iSize;
+}
+
+
+static BYTE HUFv07_decodeSymbolX2(BITv07_DStream_t* Dstream, const HUFv07_DEltX2* dt, const U32 dtLog)
+{
+    size_t const val = BITv07_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
+    BYTE const c = dt[val].byte;
+    BITv07_skipBits(Dstream, dt[val].nbBits);
+    return c;
+}
+
+#define HUFv07_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
+    *ptr++ = HUFv07_decodeSymbolX2(DStreamPtr, dt, dtLog)
+
+#define HUFv07_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
+    if (MEM_64bits() || (HUFv07_TABLELOG_MAX<=12)) \
+        HUFv07_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+#define HUFv07_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
+    if (MEM_64bits()) \
+        HUFv07_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
+
+static inline size_t HUFv07_decodeStreamX2(BYTE* p, BITv07_DStream_t* const bitDPtr, BYTE* const pEnd, const HUFv07_DEltX2* const dt, const U32 dtLog)
+{
+    BYTE* const pStart = p;
+
+    /* up to 4 symbols at a time */
+    while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p <= pEnd-4)) {
+        HUFv07_DECODE_SYMBOLX2_2(p, bitDPtr);
+        HUFv07_DECODE_SYMBOLX2_1(p, bitDPtr);
+        HUFv07_DECODE_SYMBOLX2_2(p, bitDPtr);
+        HUFv07_DECODE_SYMBOLX2_0(p, bitDPtr);
+    }
+
+    /* closer to the end */
+    while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p < pEnd))
+        HUFv07_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+    /* no more data to retrieve from bitstream, hence no need to reload */
+    while (p < pEnd)
+        HUFv07_DECODE_SYMBOLX2_0(p, bitDPtr);
+
+    return pEnd-pStart;
+}
+
+static size_t HUFv07_decompress1X2_usingDTable_internal(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUFv07_DTable* DTable)
+{
+    BYTE* op = (BYTE*)dst;
+    BYTE* const oend = op + dstSize;
+    const void* dtPtr = DTable + 1;
+    const HUFv07_DEltX2* const dt = (const HUFv07_DEltX2*)dtPtr;
+    BITv07_DStream_t bitD;
+    DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
+    U32 const dtLog = dtd.tableLog;
+
+    { size_t const errorCode = BITv07_initDStream(&bitD, cSrc, cSrcSize);
+      if (HUFv07_isError(errorCode)) return errorCode; }
+
+    HUFv07_decodeStreamX2(op, &bitD, oend, dt, dtLog);
+
+    /* check */
+    if (!BITv07_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+    return dstSize;
+}
+
+size_t HUFv07_decompress1X2_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUFv07_DTable* DTable)
+{
+    DTableDesc dtd = HUFv07_getDTableDesc(DTable);
+    if (dtd.tableType != 0) return ERROR(GENERIC);
+    return HUFv07_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+
+size_t HUFv07_decompress1X2_DCtx (HUFv07_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t const hSize = HUFv07_readDTableX2 (DCtx, cSrc, cSrcSize);
+    if (HUFv07_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize; cSrcSize -= hSize;
+
+    return HUFv07_decompress1X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);
+}
+
+size_t HUFv07_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUFv07_CREATE_STATIC_DTABLEX2(DTable, HUFv07_TABLELOG_MAX);
+    return HUFv07_decompress1X2_DCtx (DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+
+static size_t HUFv07_decompress4X2_usingDTable_internal(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUFv07_DTable* DTable)
+{
+    /* Check */
+    if (cSrcSize < 10) return ERROR(corruption_detected);  /* strict minimum : jump table + 1 byte per stream */
+
+    {   const BYTE* const istart = (const BYTE*) cSrc;
+        BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+        const void* const dtPtr = DTable + 1;
+        const HUFv07_DEltX2* const dt = (const HUFv07_DEltX2*)dtPtr;
+
+        /* Init */
+        BITv07_DStream_t bitD1;
+        BITv07_DStream_t bitD2;
+        BITv07_DStream_t bitD3;
+        BITv07_DStream_t bitD4;
+        size_t const length1 = MEM_readLE16(istart);
+        size_t const length2 = MEM_readLE16(istart+2);
+        size_t const length3 = MEM_readLE16(istart+4);
+        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+        const BYTE* const istart1 = istart + 6;  /* jumpTable */
+        const BYTE* const istart2 = istart1 + length1;
+        const BYTE* const istart3 = istart2 + length2;
+        const BYTE* const istart4 = istart3 + length3;
+        const size_t segmentSize = (dstSize+3) / 4;
+        BYTE* const opStart2 = ostart + segmentSize;
+        BYTE* const opStart3 = opStart2 + segmentSize;
+        BYTE* const opStart4 = opStart3 + segmentSize;
+        BYTE* op1 = ostart;
+        BYTE* op2 = opStart2;
+        BYTE* op3 = opStart3;
+        BYTE* op4 = opStart4;
+        U32 endSignal;
+        DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
+        U32 const dtLog = dtd.tableLog;
+
+        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
+        { size_t const errorCode = BITv07_initDStream(&bitD1, istart1, length1);
+          if (HUFv07_isError(errorCode)) return errorCode; }
+        { size_t const errorCode = BITv07_initDStream(&bitD2, istart2, length2);
+          if (HUFv07_isError(errorCode)) return errorCode; }
+        { size_t const errorCode = BITv07_initDStream(&bitD3, istart3, length3);
+          if (HUFv07_isError(errorCode)) return errorCode; }
+        { size_t const errorCode = BITv07_initDStream(&bitD4, istart4, length4);
+          if (HUFv07_isError(errorCode)) return errorCode; }
+
+        /* 16-32 symbols per loop (4-8 symbols per stream) */
+        endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4);
+        for ( ; (endSignal==BITv07_DStream_unfinished) && (op4<(oend-7)) ; ) {
+            HUFv07_DECODE_SYMBOLX2_2(op1, &bitD1);
+            HUFv07_DECODE_SYMBOLX2_2(op2, &bitD2);
+            HUFv07_DECODE_SYMBOLX2_2(op3, &bitD3);
+            HUFv07_DECODE_SYMBOLX2_2(op4, &bitD4);
+            HUFv07_DECODE_SYMBOLX2_1(op1, &bitD1);
+            HUFv07_DECODE_SYMBOLX2_1(op2, &bitD2);
+            HUFv07_DECODE_SYMBOLX2_1(op3, &bitD3);
+            HUFv07_DECODE_SYMBOLX2_1(op4, &bitD4);
+            HUFv07_DECODE_SYMBOLX2_2(op1, &bitD1);
+            HUFv07_DECODE_SYMBOLX2_2(op2, &bitD2);
+            HUFv07_DECODE_SYMBOLX2_2(op3, &bitD3);
+            HUFv07_DECODE_SYMBOLX2_2(op4, &bitD4);
+            HUFv07_DECODE_SYMBOLX2_0(op1, &bitD1);
+            HUFv07_DECODE_SYMBOLX2_0(op2, &bitD2);
+            HUFv07_DECODE_SYMBOLX2_0(op3, &bitD3);
+            HUFv07_DECODE_SYMBOLX2_0(op4, &bitD4);
+            endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4);
+        }
+
+        /* check corruption */
+        if (op1 > opStart2) return ERROR(corruption_detected);
+        if (op2 > opStart3) return ERROR(corruption_detected);
+        if (op3 > opStart4) return ERROR(corruption_detected);
+        /* note : op4 supposed already verified within main loop */
+
+        /* finish bitStreams one by one */
+        HUFv07_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
+        HUFv07_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
+        HUFv07_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
+        HUFv07_decodeStreamX2(op4, &bitD4, oend,     dt, dtLog);
+
+        /* check */
+        endSignal = BITv07_endOfDStream(&bitD1) & BITv07_endOfDStream(&bitD2) & BITv07_endOfDStream(&bitD3) & BITv07_endOfDStream(&bitD4);
+        if (!endSignal) return ERROR(corruption_detected);
+
+        /* decoded size */
+        return dstSize;
+    }
+}
+
+
+size_t HUFv07_decompress4X2_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUFv07_DTable* DTable)
+{
+    DTableDesc dtd = HUFv07_getDTableDesc(DTable);
+    if (dtd.tableType != 0) return ERROR(GENERIC);
+    return HUFv07_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+
+
+size_t HUFv07_decompress4X2_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t const hSize = HUFv07_readDTableX2 (dctx, cSrc, cSrcSize);
+    if (HUFv07_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize; cSrcSize -= hSize;
+
+    return HUFv07_decompress4X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, dctx);
+}
+
+size_t HUFv07_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUFv07_CREATE_STATIC_DTABLEX2(DTable, HUFv07_TABLELOG_MAX);
+    return HUFv07_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+
+/* *************************/
+/* double-symbols decoding */
+/* *************************/
+typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUFv07_DEltX4;  /* double-symbols decoding */
+
+typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
+
+static void HUFv07_fillDTableX4Level2(HUFv07_DEltX4* DTable, U32 sizeLog, const U32 consumed,
+                           const U32* rankValOrigin, const int minWeight,
+                           const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
+                           U32 nbBitsBaseline, U16 baseSeq)
+{
+    HUFv07_DEltX4 DElt;
+    U32 rankVal[HUFv07_TABLELOG_ABSOLUTEMAX + 1];
+
+    /* get pre-calculated rankVal */
+    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+    /* fill skipped values */
+    if (minWeight>1) {
+        U32 i, skipSize = rankVal[minWeight];
+        MEM_writeLE16(&(DElt.sequence), baseSeq);
+        DElt.nbBits   = (BYTE)(consumed);
+        DElt.length   = 1;
+        for (i = 0; i < skipSize; i++)
+            DTable[i] = DElt;
+    }
+
+    /* fill DTable */
+    { U32 s; for (s=0; s<sortedListSize; s++) {   /* note : sortedSymbols already skipped */
+        const U32 symbol = sortedSymbols[s].symbol;
+        const U32 weight = sortedSymbols[s].weight;
+        const U32 nbBits = nbBitsBaseline - weight;
+        const U32 length = 1 << (sizeLog-nbBits);
+        const U32 start = rankVal[weight];
+        U32 i = start;
+        const U32 end = start + length;
+
+        MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
+        DElt.nbBits = (BYTE)(nbBits + consumed);
+        DElt.length = 2;
+        do { DTable[i++] = DElt; } while (i<end);   /* since length >= 1 */
+
+        rankVal[weight] += length;
+    }}
+}
+
+typedef U32 rankVal_t[HUFv07_TABLELOG_ABSOLUTEMAX][HUFv07_TABLELOG_ABSOLUTEMAX + 1];
+
+static void HUFv07_fillDTableX4(HUFv07_DEltX4* DTable, const U32 targetLog,
+                           const sortedSymbol_t* sortedList, const U32 sortedListSize,
+                           const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
+                           const U32 nbBitsBaseline)
+{
+    U32 rankVal[HUFv07_TABLELOG_ABSOLUTEMAX + 1];
+    const int scaleLog = nbBitsBaseline - targetLog;   /* note : targetLog >= srcLog, hence scaleLog <= 1 */
+    const U32 minBits  = nbBitsBaseline - maxWeight;
+    U32 s;
+
+    memcpy(rankVal, rankValOrigin, sizeof(rankVal));
+
+    /* fill DTable */
+    for (s=0; s<sortedListSize; s++) {
+        const U16 symbol = sortedList[s].symbol;
+        const U32 weight = sortedList[s].weight;
+        const U32 nbBits = nbBitsBaseline - weight;
+        const U32 start = rankVal[weight];
+        const U32 length = 1 << (targetLog-nbBits);
+
+        if (targetLog-nbBits >= minBits) {   /* enough room for a second symbol */
+            U32 sortedRank;
+            int minWeight = nbBits + scaleLog;
+            if (minWeight < 1) minWeight = 1;
+            sortedRank = rankStart[minWeight];
+            HUFv07_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
+                           rankValOrigin[nbBits], minWeight,
+                           sortedList+sortedRank, sortedListSize-sortedRank,
+                           nbBitsBaseline, symbol);
+        } else {
+            HUFv07_DEltX4 DElt;
+            MEM_writeLE16(&(DElt.sequence), symbol);
+            DElt.nbBits = (BYTE)(nbBits);
+            DElt.length = 1;
+            {   U32 u;
+                const U32 end = start + length;
+                for (u = start; u < end; u++) DTable[u] = DElt;
+        }   }
+        rankVal[weight] += length;
+    }
+}
+
+size_t HUFv07_readDTableX4 (HUFv07_DTable* DTable, const void* src, size_t srcSize)
+{
+    BYTE weightList[HUFv07_SYMBOLVALUE_MAX + 1];
+    sortedSymbol_t sortedSymbol[HUFv07_SYMBOLVALUE_MAX + 1];
+    U32 rankStats[HUFv07_TABLELOG_ABSOLUTEMAX + 1] = { 0 };
+    U32 rankStart0[HUFv07_TABLELOG_ABSOLUTEMAX + 2] = { 0 };
+    U32* const rankStart = rankStart0+1;
+    rankVal_t rankVal;
+    U32 tableLog, maxW, sizeOfSort, nbSymbols;
+    DTableDesc dtd = HUFv07_getDTableDesc(DTable);
+    U32 const maxTableLog = dtd.maxTableLog;
+    size_t iSize;
+    void* dtPtr = DTable+1;   /* force compiler to avoid strict-aliasing */
+    HUFv07_DEltX4* const dt = (HUFv07_DEltX4*)dtPtr;
+
+    HUFv07_STATIC_ASSERT(sizeof(HUFv07_DEltX4) == sizeof(HUFv07_DTable));   /* if compilation fails here, assertion is false */
+    if (maxTableLog > HUFv07_TABLELOG_ABSOLUTEMAX) return ERROR(tableLog_tooLarge);
+    //memset(weightList, 0, sizeof(weightList));   /* is not necessary, even though some analyzer complain ... */
+
+    iSize = HUFv07_readStats(weightList, HUFv07_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
+    if (HUFv07_isError(iSize)) return iSize;
+
+    /* check result */
+    if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge);   /* DTable can't fit code depth */
+
+    /* find maxWeight */
+    for (maxW = tableLog; rankStats[maxW]==0; maxW--) {}  /* necessarily finds a solution before 0 */
+
+    /* Get start index of each weight */
+    {   U32 w, nextRankStart = 0;
+        for (w=1; w<maxW+1; w++) {
+            U32 current = nextRankStart;
+            nextRankStart += rankStats[w];
+            rankStart[w] = current;
+        }
+        rankStart[0] = nextRankStart;   /* put all 0w symbols at the end of sorted list*/
+        sizeOfSort = nextRankStart;
+    }
+
+    /* sort symbols by weight */
+    {   U32 s;
+        for (s=0; s<nbSymbols; s++) {
+            U32 const w = weightList[s];
+            U32 const r = rankStart[w]++;
+            sortedSymbol[r].symbol = (BYTE)s;
+            sortedSymbol[r].weight = (BYTE)w;
+        }
+        rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
+    }
+
+    /* Build rankVal */
+    {   U32* const rankVal0 = rankVal[0];
+        {   int const rescale = (maxTableLog-tableLog) - 1;   /* tableLog <= maxTableLog */
+            U32 nextRankVal = 0;
+            U32 w;
+            for (w=1; w<maxW+1; w++) {
+                U32 current = nextRankVal;
+                nextRankVal += rankStats[w] << (w+rescale);
+                rankVal0[w] = current;
+        }   }
+        {   U32 const minBits = tableLog+1 - maxW;
+            U32 consumed;
+            for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
+                U32* const rankValPtr = rankVal[consumed];
+                U32 w;
+                for (w = 1; w < maxW+1; w++) {
+                    rankValPtr[w] = rankVal0[w] >> consumed;
+    }   }   }   }
+
+    HUFv07_fillDTableX4(dt, maxTableLog,
+                   sortedSymbol, sizeOfSort,
+                   rankStart0, rankVal, maxW,
+                   tableLog+1);
+
+    dtd.tableLog = (BYTE)maxTableLog;
+    dtd.tableType = 1;
+    memcpy(DTable, &dtd, sizeof(dtd));
+    return iSize;
+}
+
+
+static U32 HUFv07_decodeSymbolX4(void* op, BITv07_DStream_t* DStream, const HUFv07_DEltX4* dt, const U32 dtLog)
+{
+    const size_t val = BITv07_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    memcpy(op, dt+val, 2);
+    BITv07_skipBits(DStream, dt[val].nbBits);
+    return dt[val].length;
+}
+
+static U32 HUFv07_decodeLastSymbolX4(void* op, BITv07_DStream_t* DStream, const HUFv07_DEltX4* dt, const U32 dtLog)
+{
+    const size_t val = BITv07_lookBitsFast(DStream, dtLog);   /* note : dtLog >= 1 */
+    memcpy(op, dt+val, 1);
+    if (dt[val].length==1) BITv07_skipBits(DStream, dt[val].nbBits);
+    else {
+        if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
+            BITv07_skipBits(DStream, dt[val].nbBits);
+            if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
+                DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8);   /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
+    }   }
+    return 1;
+}
+
+
+#define HUFv07_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
+    ptr += HUFv07_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUFv07_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
+    if (MEM_64bits() || (HUFv07_TABLELOG_MAX<=12)) \
+        ptr += HUFv07_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+#define HUFv07_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
+    if (MEM_64bits()) \
+        ptr += HUFv07_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
+
+static inline size_t HUFv07_decodeStreamX4(BYTE* p, BITv07_DStream_t* bitDPtr, BYTE* const pEnd, const HUFv07_DEltX4* const dt, const U32 dtLog)
+{
+    BYTE* const pStart = p;
+
+    /* up to 8 symbols at a time */
+    while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p < pEnd-7)) {
+        HUFv07_DECODE_SYMBOLX4_2(p, bitDPtr);
+        HUFv07_DECODE_SYMBOLX4_1(p, bitDPtr);
+        HUFv07_DECODE_SYMBOLX4_2(p, bitDPtr);
+        HUFv07_DECODE_SYMBOLX4_0(p, bitDPtr);
+    }
+
+    /* closer to end : up to 2 symbols at a time */
+    while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p <= pEnd-2))
+        HUFv07_DECODE_SYMBOLX4_0(p, bitDPtr);
+
+    while (p <= pEnd-2)
+        HUFv07_DECODE_SYMBOLX4_0(p, bitDPtr);   /* no need to reload : reached the end of DStream */
+
+    if (p < pEnd)
+        p += HUFv07_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
+
+    return p-pStart;
+}
+
+
+static size_t HUFv07_decompress1X4_usingDTable_internal(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUFv07_DTable* DTable)
+{
+    BITv07_DStream_t bitD;
+
+    /* Init */
+    {   size_t const errorCode = BITv07_initDStream(&bitD, cSrc, cSrcSize);
+        if (HUFv07_isError(errorCode)) return errorCode;
+    }
+
+    /* decode */
+    {   BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+        const void* const dtPtr = DTable+1;   /* force compiler to not use strict-aliasing */
+        const HUFv07_DEltX4* const dt = (const HUFv07_DEltX4*)dtPtr;
+        DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
+        HUFv07_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog);
+    }
+
+    /* check */
+    if (!BITv07_endOfDStream(&bitD)) return ERROR(corruption_detected);
+
+    /* decoded size */
+    return dstSize;
+}
+
+size_t HUFv07_decompress1X4_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUFv07_DTable* DTable)
+{
+    DTableDesc dtd = HUFv07_getDTableDesc(DTable);
+    if (dtd.tableType != 1) return ERROR(GENERIC);
+    return HUFv07_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+
+size_t HUFv07_decompress1X4_DCtx (HUFv07_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t const hSize = HUFv07_readDTableX4 (DCtx, cSrc, cSrcSize);
+    if (HUFv07_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize; cSrcSize -= hSize;
+
+    return HUFv07_decompress1X4_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);
+}
+
+size_t HUFv07_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUFv07_CREATE_STATIC_DTABLEX4(DTable, HUFv07_TABLELOG_MAX);
+    return HUFv07_decompress1X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+static size_t HUFv07_decompress4X4_usingDTable_internal(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUFv07_DTable* DTable)
+{
+    if (cSrcSize < 10) return ERROR(corruption_detected);   /* strict minimum : jump table + 1 byte per stream */
+
+    {   const BYTE* const istart = (const BYTE*) cSrc;
+        BYTE* const ostart = (BYTE*) dst;
+        BYTE* const oend = ostart + dstSize;
+        const void* const dtPtr = DTable+1;
+        const HUFv07_DEltX4* const dt = (const HUFv07_DEltX4*)dtPtr;
+
+        /* Init */
+        BITv07_DStream_t bitD1;
+        BITv07_DStream_t bitD2;
+        BITv07_DStream_t bitD3;
+        BITv07_DStream_t bitD4;
+        size_t const length1 = MEM_readLE16(istart);
+        size_t const length2 = MEM_readLE16(istart+2);
+        size_t const length3 = MEM_readLE16(istart+4);
+        size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
+        const BYTE* const istart1 = istart + 6;  /* jumpTable */
+        const BYTE* const istart2 = istart1 + length1;
+        const BYTE* const istart3 = istart2 + length2;
+        const BYTE* const istart4 = istart3 + length3;
+        size_t const segmentSize = (dstSize+3) / 4;
+        BYTE* const opStart2 = ostart + segmentSize;
+        BYTE* const opStart3 = opStart2 + segmentSize;
+        BYTE* const opStart4 = opStart3 + segmentSize;
+        BYTE* op1 = ostart;
+        BYTE* op2 = opStart2;
+        BYTE* op3 = opStart3;
+        BYTE* op4 = opStart4;
+        U32 endSignal;
+        DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
+        U32 const dtLog = dtd.tableLog;
+
+        if (length4 > cSrcSize) return ERROR(corruption_detected);   /* overflow */
+        { size_t const errorCode = BITv07_initDStream(&bitD1, istart1, length1);
+          if (HUFv07_isError(errorCode)) return errorCode; }
+        { size_t const errorCode = BITv07_initDStream(&bitD2, istart2, length2);
+          if (HUFv07_isError(errorCode)) return errorCode; }
+        { size_t const errorCode = BITv07_initDStream(&bitD3, istart3, length3);
+          if (HUFv07_isError(errorCode)) return errorCode; }
+        { size_t const errorCode = BITv07_initDStream(&bitD4, istart4, length4);
+          if (HUFv07_isError(errorCode)) return errorCode; }
+
+        /* 16-32 symbols per loop (4-8 symbols per stream) */
+        endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4);
+        for ( ; (endSignal==BITv07_DStream_unfinished) && (op4<(oend-7)) ; ) {
+            HUFv07_DECODE_SYMBOLX4_2(op1, &bitD1);
+            HUFv07_DECODE_SYMBOLX4_2(op2, &bitD2);
+            HUFv07_DECODE_SYMBOLX4_2(op3, &bitD3);
+            HUFv07_DECODE_SYMBOLX4_2(op4, &bitD4);
+            HUFv07_DECODE_SYMBOLX4_1(op1, &bitD1);
+            HUFv07_DECODE_SYMBOLX4_1(op2, &bitD2);
+            HUFv07_DECODE_SYMBOLX4_1(op3, &bitD3);
+            HUFv07_DECODE_SYMBOLX4_1(op4, &bitD4);
+            HUFv07_DECODE_SYMBOLX4_2(op1, &bitD1);
+            HUFv07_DECODE_SYMBOLX4_2(op2, &bitD2);
+            HUFv07_DECODE_SYMBOLX4_2(op3, &bitD3);
+            HUFv07_DECODE_SYMBOLX4_2(op4, &bitD4);
+            HUFv07_DECODE_SYMBOLX4_0(op1, &bitD1);
+            HUFv07_DECODE_SYMBOLX4_0(op2, &bitD2);
+            HUFv07_DECODE_SYMBOLX4_0(op3, &bitD3);
+            HUFv07_DECODE_SYMBOLX4_0(op4, &bitD4);
+
+            endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4);
+        }
+
+        /* check corruption */
+        if (op1 > opStart2) return ERROR(corruption_detected);
+        if (op2 > opStart3) return ERROR(corruption_detected);
+        if (op3 > opStart4) return ERROR(corruption_detected);
+        /* note : op4 supposed already verified within main loop */
+
+        /* finish bitStreams one by one */
+        HUFv07_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
+        HUFv07_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
+        HUFv07_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
+        HUFv07_decodeStreamX4(op4, &bitD4, oend,     dt, dtLog);
+
+        /* check */
+        { U32 const endCheck = BITv07_endOfDStream(&bitD1) & BITv07_endOfDStream(&bitD2) & BITv07_endOfDStream(&bitD3) & BITv07_endOfDStream(&bitD4);
+          if (!endCheck) return ERROR(corruption_detected); }
+
+        /* decoded size */
+        return dstSize;
+    }
+}
+
+
+size_t HUFv07_decompress4X4_usingDTable(
+          void* dst,  size_t dstSize,
+    const void* cSrc, size_t cSrcSize,
+    const HUFv07_DTable* DTable)
+{
+    DTableDesc dtd = HUFv07_getDTableDesc(DTable);
+    if (dtd.tableType != 1) return ERROR(GENERIC);
+    return HUFv07_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
+}
+
+
+size_t HUFv07_decompress4X4_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    const BYTE* ip = (const BYTE*) cSrc;
+
+    size_t hSize = HUFv07_readDTableX4 (dctx, cSrc, cSrcSize);
+    if (HUFv07_isError(hSize)) return hSize;
+    if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
+    ip += hSize; cSrcSize -= hSize;
+
+    return HUFv07_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
+}
+
+size_t HUFv07_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    HUFv07_CREATE_STATIC_DTABLEX4(DTable, HUFv07_TABLELOG_MAX);
+    return HUFv07_decompress4X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
+}
+
+
+/* ********************************/
+/* Generic decompression selector */
+/* ********************************/
+
+size_t HUFv07_decompress1X_usingDTable(void* dst, size_t maxDstSize,
+                                    const void* cSrc, size_t cSrcSize,
+                                    const HUFv07_DTable* DTable)
+{
+    DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
+    return dtd.tableType ? HUFv07_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) :
+                           HUFv07_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
+}
+
+size_t HUFv07_decompress4X_usingDTable(void* dst, size_t maxDstSize,
+                                    const void* cSrc, size_t cSrcSize,
+                                    const HUFv07_DTable* DTable)
+{
+    DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
+    return dtd.tableType ? HUFv07_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) :
+                           HUFv07_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
+}
+
+
+typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
+static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
+{
+    /* single, double, quad */
+    {{0,0}, {1,1}, {2,2}},  /* Q==0 : impossible */
+    {{0,0}, {1,1}, {2,2}},  /* Q==1 : impossible */
+    {{  38,130}, {1313, 74}, {2151, 38}},   /* Q == 2 : 12-18% */
+    {{ 448,128}, {1353, 74}, {2238, 41}},   /* Q == 3 : 18-25% */
+    {{ 556,128}, {1353, 74}, {2238, 47}},   /* Q == 4 : 25-32% */
+    {{ 714,128}, {1418, 74}, {2436, 53}},   /* Q == 5 : 32-38% */
+    {{ 883,128}, {1437, 74}, {2464, 61}},   /* Q == 6 : 38-44% */
+    {{ 897,128}, {1515, 75}, {2622, 68}},   /* Q == 7 : 44-50% */
+    {{ 926,128}, {1613, 75}, {2730, 75}},   /* Q == 8 : 50-56% */
+    {{ 947,128}, {1729, 77}, {3359, 77}},   /* Q == 9 : 56-62% */
+    {{1107,128}, {2083, 81}, {4006, 84}},   /* Q ==10 : 62-69% */
+    {{1177,128}, {2379, 87}, {4785, 88}},   /* Q ==11 : 69-75% */
+    {{1242,128}, {2415, 93}, {5155, 84}},   /* Q ==12 : 75-81% */
+    {{1349,128}, {2644,106}, {5260,106}},   /* Q ==13 : 81-87% */
+    {{1455,128}, {2422,124}, {4174,124}},   /* Q ==14 : 87-93% */
+    {{ 722,128}, {1891,145}, {1936,146}},   /* Q ==15 : 93-99% */
+};
+
+/** HUFv07_selectDecoder() :
+*   Tells which decoder is likely to decode faster,
+*   based on a set of pre-determined metrics.
+*   @return : 0==HUFv07_decompress4X2, 1==HUFv07_decompress4X4 .
+*   Assumption : 0 < cSrcSize < dstSize <= 128 KB */
+U32 HUFv07_selectDecoder (size_t dstSize, size_t cSrcSize)
+{
+    /* decoder timing evaluation */
+    U32 const Q = (U32)(cSrcSize * 16 / dstSize);   /* Q < 16 since dstSize > cSrcSize */
+    U32 const D256 = (U32)(dstSize >> 8);
+    U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
+    U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
+    DTime1 += DTime1 >> 3;  /* advantage to algorithm using less memory, for cache eviction */
+
+    return DTime1 < DTime0;
+}
+
+
+typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
+
+size_t HUFv07_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    static const decompressionAlgo decompress[2] = { HUFv07_decompress4X2, HUFv07_decompress4X4 };
+
+    /* validation checks */
+    if (dstSize == 0) return ERROR(dstSize_tooSmall);
+    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
+    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
+    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
+
+    {   U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize);
+        return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
+    }
+
+    //return HUFv07_decompress4X2(dst, dstSize, cSrc, cSrcSize);   /* multi-streams single-symbol decoding */
+    //return HUFv07_decompress4X4(dst, dstSize, cSrc, cSrcSize);   /* multi-streams double-symbols decoding */
+}
+
+size_t HUFv07_decompress4X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    /* validation checks */
+    if (dstSize == 0) return ERROR(dstSize_tooSmall);
+    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
+    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
+    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
+
+    {   U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize);
+        return algoNb ? HUFv07_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
+                        HUFv07_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
+    }
+}
+
+size_t HUFv07_decompress4X_hufOnly (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    /* validation checks */
+    if (dstSize == 0) return ERROR(dstSize_tooSmall);
+    if ((cSrcSize >= dstSize) || (cSrcSize <= 1)) return ERROR(corruption_detected);   /* invalid */
+
+    {   U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize);
+        return algoNb ? HUFv07_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
+                        HUFv07_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
+    }
+}
+
+size_t HUFv07_decompress1X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
+{
+    /* validation checks */
+    if (dstSize == 0) return ERROR(dstSize_tooSmall);
+    if (cSrcSize > dstSize) return ERROR(corruption_detected);   /* invalid */
+    if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; }   /* not compressed */
+    if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; }   /* RLE */
+
+    {   U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize);
+        return algoNb ? HUFv07_decompress1X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
+                        HUFv07_decompress1X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
+    }
+}
+/*
+    Common functions of Zstd compression library
+    Copyright (C) 2015-2016, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd homepage : http://www.zstd.net/
+*/
+
+
+
+/*-****************************************
+*  ZSTD Error Management
+******************************************/
+/*! ZSTDv07_isError() :
+*   tells if a return value is an error code */
+unsigned ZSTDv07_isError(size_t code) { return ERR_isError(code); }
+
+/*! ZSTDv07_getErrorName() :
+*   provides error code string from function result (useful for debugging) */
+const char* ZSTDv07_getErrorName(size_t code) { return ERR_getErrorName(code); }
+
+
+
+/* **************************************************************
+*  ZBUFF Error Management
+****************************************************************/
+unsigned ZBUFFv07_isError(size_t errorCode) { return ERR_isError(errorCode); }
+
+const char* ZBUFFv07_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
+
+
+
+void* ZSTDv07_defaultAllocFunction(void* opaque, size_t size)
+{
+    void* address = malloc(size);
+    (void)opaque;
+    /* printf("alloc %p, %d opaque=%p \n", address, (int)size, opaque); */
+    return address;
+}
+
+void ZSTDv07_defaultFreeFunction(void* opaque, void* address)
+{
+    (void)opaque;
+    /* if (address) printf("free %p opaque=%p \n", address, opaque); */
+    free(address);
+}
+/*
+    zstd_internal - common functions to include
+    Header File for include
+    Copyright (C) 2014-2016, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd homepage : https://www.zstd.net
+*/
+#ifndef ZSTDv07_CCOMMON_H_MODULE
+#define ZSTDv07_CCOMMON_H_MODULE
+
+
+/*-*************************************
+*  Common macros
+***************************************/
+#define MIN(a,b) ((a)<(b) ? (a) : (b))
+#define MAX(a,b) ((a)>(b) ? (a) : (b))
+
+
+/*-*************************************
+*  Common constants
+***************************************/
+#define ZSTDv07_OPT_NUM    (1<<12)
+#define ZSTDv07_DICT_MAGIC  0xEC30A437   /* v0.7 */
+
+#define ZSTDv07_REP_NUM    3
+#define ZSTDv07_REP_INIT   ZSTDv07_REP_NUM
+#define ZSTDv07_REP_MOVE   (ZSTDv07_REP_NUM-1)
+static const U32 repStartValue[ZSTDv07_REP_NUM] = { 1, 4, 8 };
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define BIT7 128
+#define BIT6  64
+#define BIT5  32
+#define BIT4  16
+#define BIT1   2
+#define BIT0   1
+
+#define ZSTDv07_WINDOWLOG_ABSOLUTEMIN 10
+static const size_t ZSTDv07_fcs_fieldSize[4] = { 0, 2, 4, 8 };
+static const size_t ZSTDv07_did_fieldSize[4] = { 0, 1, 2, 4 };
+
+#define ZSTDv07_BLOCKHEADERSIZE 3   /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
+static const size_t ZSTDv07_blockHeaderSize = ZSTDv07_BLOCKHEADERSIZE;
+typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
+
+#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
+#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */)   /* for a non-null block */
+
+#define HufLog 12
+typedef enum { lbt_huffman, lbt_repeat, lbt_raw, lbt_rle } litBlockType_t;
+
+#define LONGNBSEQ 0x7F00
+
+#define MINMATCH 3
+#define EQUAL_READ32 4
+
+#define Litbits  8
+#define MaxLit ((1<<Litbits) - 1)
+#define MaxML  52
+#define MaxLL  35
+#define MaxOff 28
+#define MaxSeq MAX(MaxLL, MaxML)   /* Assumption : MaxOff < MaxLL,MaxML */
+#define MLFSELog    9
+#define LLFSELog    9
+#define OffFSELog   8
+
+#define FSEv07_ENCODING_RAW     0
+#define FSEv07_ENCODING_RLE     1
+#define FSEv07_ENCODING_STATIC  2
+#define FSEv07_ENCODING_DYNAMIC 3
+
+static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                                      1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9,10,11,12,
+                                     13,14,15,16 };
+static const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
+                                             2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
+                                            -1,-1,-1,-1 };
+static const U32 LL_defaultNormLog = 6;
+
+static const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                                      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                                      1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9,10,11,
+                                     12,13,14,15,16 };
+static const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+                                             1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+                                             1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,
+                                            -1,-1,-1,-1,-1 };
+static const U32 ML_defaultNormLog = 6;
+
+static const S16 OF_defaultNorm[MaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+                                              1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1 };
+static const U32 OF_defaultNormLog = 5;
+
+
+/*-*******************************************
+*  Shared functions to include for inlining
+*********************************************/
+static void ZSTDv07_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
+#define COPY8(d,s) { ZSTDv07_copy8(d,s); d+=8; s+=8; }
+
+/*! ZSTDv07_wildcopy() :
+*   custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
+#define WILDCOPY_OVERLENGTH 8
+MEM_STATIC void ZSTDv07_wildcopy(void* dst, const void* src, ptrdiff_t length)
+{
+    const BYTE* ip = (const BYTE*)src;
+    BYTE* op = (BYTE*)dst;
+    BYTE* const oend = op + length;
+    do
+        COPY8(op, ip)
+    while (op < oend);
+}
+
+
+/*-*******************************************
+*  Private interfaces
+*********************************************/
+typedef struct ZSTDv07_stats_s ZSTDv07_stats_t;
+
+typedef struct {
+    U32 off;
+    U32 len;
+} ZSTDv07_match_t;
+
+typedef struct {
+    U32 price;
+    U32 off;
+    U32 mlen;
+    U32 litlen;
+    U32 rep[ZSTDv07_REP_INIT];
+} ZSTDv07_optimal_t;
+
+struct ZSTDv07_stats_s { U32 unused; };
+
+typedef struct {
+    void* buffer;
+    U32*  offsetStart;
+    U32*  offset;
+    BYTE* offCodeStart;
+    BYTE* litStart;
+    BYTE* lit;
+    U16*  litLengthStart;
+    U16*  litLength;
+    BYTE* llCodeStart;
+    U16*  matchLengthStart;
+    U16*  matchLength;
+    BYTE* mlCodeStart;
+    U32   longLengthID;   /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
+    U32   longLengthPos;
+    /* opt */
+    ZSTDv07_optimal_t* priceTable;
+    ZSTDv07_match_t* matchTable;
+    U32* matchLengthFreq;
+    U32* litLengthFreq;
+    U32* litFreq;
+    U32* offCodeFreq;
+    U32  matchLengthSum;
+    U32  matchSum;
+    U32  litLengthSum;
+    U32  litSum;
+    U32  offCodeSum;
+    U32  log2matchLengthSum;
+    U32  log2matchSum;
+    U32  log2litLengthSum;
+    U32  log2litSum;
+    U32  log2offCodeSum;
+    U32  factor;
+    U32  cachedPrice;
+    U32  cachedLitLength;
+    const BYTE* cachedLiterals;
+    ZSTDv07_stats_t stats;
+} seqStore_t;
+
+void ZSTDv07_seqToCodes(const seqStore_t* seqStorePtr, size_t const nbSeq);
+
+/* custom memory allocation functions */
+static const ZSTDv07_customMem defaultCustomMem = { ZSTDv07_defaultAllocFunction, ZSTDv07_defaultFreeFunction, NULL };
+
+#endif   /* ZSTDv07_CCOMMON_H_MODULE */
+/*
+    zstd - standard compression library
+    Copyright (C) 2014-2016, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd homepage : http://www.zstd.net
+*/
+
+/* ***************************************************************
+*  Tuning parameters
+*****************************************************************/
+/*!
+ * HEAPMODE :
+ * Select how default decompression function ZSTDv07_decompress() will allocate memory,
+ * in memory stack (0), or in memory heap (1, requires malloc())
+ */
+#ifndef ZSTDv07_HEAPMODE
+#  define ZSTDv07_HEAPMODE 1
+#endif
+
+
+/*-*******************************************************
+*  Compiler specifics
+*********************************************************/
+#ifdef _MSC_VER    /* Visual Studio */
+#  include <intrin.h>                    /* For Visual 2005 */
+#  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
+#  pragma warning(disable : 4324)        /* disable: C4324: padded structure */
+#  pragma warning(disable : 4100)        /* disable: C4100: unreferenced formal parameter */
+#endif
+
+
+/*-*************************************
+*  Macros
+***************************************/
+#define ZSTDv07_isError ERR_isError   /* for inlining */
+#define FSEv07_isError  ERR_isError
+#define HUFv07_isError  ERR_isError
+
+
+/*_*******************************************************
+*  Memory operations
+**********************************************************/
+static void ZSTDv07_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
+
+
+/*-*************************************************************
+*   Context management
+***************************************************************/
+typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
+               ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,
+               ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTDv07_dStage;
+
+struct ZSTDv07_DCtx_s
+{
+    FSEv07_DTable LLTable[FSEv07_DTABLE_SIZE_U32(LLFSELog)];
+    FSEv07_DTable OffTable[FSEv07_DTABLE_SIZE_U32(OffFSELog)];
+    FSEv07_DTable MLTable[FSEv07_DTABLE_SIZE_U32(MLFSELog)];
+    HUFv07_DTable hufTable[HUFv07_DTABLE_SIZE(HufLog)];  /* can accommodate HUFv07_decompress4X */
+    const void* previousDstEnd;
+    const void* base;
+    const void* vBase;
+    const void* dictEnd;
+    size_t expected;
+    U32 rep[3];
+    ZSTDv07_frameParams fParams;
+    blockType_t bType;   /* used in ZSTDv07_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
+    ZSTDv07_dStage stage;
+    U32 litEntropy;
+    U32 fseEntropy;
+    XXH64_state_t xxhState;
+    size_t headerSize;
+    U32 dictID;
+    const BYTE* litPtr;
+    ZSTDv07_customMem customMem;
+    size_t litSize;
+    BYTE litBuffer[ZSTDv07_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH];
+    BYTE headerBuffer[ZSTDv07_FRAMEHEADERSIZE_MAX];
+};  /* typedef'd to ZSTDv07_DCtx within "zstd_static.h" */
+
+int ZSTDv07_isSkipFrame(ZSTDv07_DCtx* dctx);
+
+size_t ZSTDv07_sizeofDCtx (const ZSTDv07_DCtx* dctx) { return sizeof(*dctx); }
+
+size_t ZSTDv07_estimateDCtxSize(void) { return sizeof(ZSTDv07_DCtx); }
+
+size_t ZSTDv07_decompressBegin(ZSTDv07_DCtx* dctx)
+{
+    dctx->expected = ZSTDv07_frameHeaderSize_min;
+    dctx->stage = ZSTDds_getFrameHeaderSize;
+    dctx->previousDstEnd = NULL;
+    dctx->base = NULL;
+    dctx->vBase = NULL;
+    dctx->dictEnd = NULL;
+    dctx->hufTable[0] = (HUFv07_DTable)((HufLog)*0x1000001);
+    dctx->litEntropy = dctx->fseEntropy = 0;
+    dctx->dictID = 0;
+    { int i; for (i=0; i<ZSTDv07_REP_NUM; i++) dctx->rep[i] = repStartValue[i]; }
+    return 0;
+}
+
+ZSTDv07_DCtx* ZSTDv07_createDCtx_advanced(ZSTDv07_customMem customMem)
+{
+    ZSTDv07_DCtx* dctx;
+
+    if (!customMem.customAlloc && !customMem.customFree)
+        customMem = defaultCustomMem;
+
+    if (!customMem.customAlloc || !customMem.customFree)
+        return NULL;
+
+    dctx = (ZSTDv07_DCtx*) customMem.customAlloc(customMem.opaque, sizeof(ZSTDv07_DCtx));
+    if (!dctx) return NULL;
+    memcpy(&dctx->customMem, &customMem, sizeof(ZSTDv07_customMem));
+    ZSTDv07_decompressBegin(dctx);
+    return dctx;
+}
+
+ZSTDv07_DCtx* ZSTDv07_createDCtx(void)
+{
+    return ZSTDv07_createDCtx_advanced(defaultCustomMem);
+}
+
+size_t ZSTDv07_freeDCtx(ZSTDv07_DCtx* dctx)
+{
+    if (dctx==NULL) return 0;   /* support free on NULL */
+    dctx->customMem.customFree(dctx->customMem.opaque, dctx);
+    return 0;   /* reserved as a potential error code in the future */
+}
+
+void ZSTDv07_copyDCtx(ZSTDv07_DCtx* dstDCtx, const ZSTDv07_DCtx* srcDCtx)
+{
+    memcpy(dstDCtx, srcDCtx,
+           sizeof(ZSTDv07_DCtx) - (ZSTDv07_BLOCKSIZE_ABSOLUTEMAX+WILDCOPY_OVERLENGTH + ZSTDv07_frameHeaderSize_max));  /* no need to copy workspace */
+}
+
+
+/*-*************************************************************
+*   Decompression section
+***************************************************************/
+
+/* Frame format description
+   Frame Header -  [ Block Header - Block ] - Frame End
+   1) Frame Header
+      - 4 bytes - Magic Number : ZSTDv07_MAGICNUMBER (defined within zstd.h)
+      - 1 byte  - Frame Descriptor
+   2) Block Header
+      - 3 bytes, starting with a 2-bits descriptor
+                 Uncompressed, Compressed, Frame End, unused
+   3) Block
+      See Block Format Description
+   4) Frame End
+      - 3 bytes, compatible with Block Header
+*/
+
+
+/* Frame Header :
+
+   1 byte - FrameHeaderDescription :
+   bit 0-1 : dictID (0, 1, 2 or 4 bytes)
+   bit 2   : checksumFlag
+   bit 3   : reserved (must be zero)
+   bit 4   : reserved (unused, can be any value)
+   bit 5   : Single Segment (if 1, WindowLog byte is not present)
+   bit 6-7 : FrameContentFieldSize (0, 2, 4, or 8)
+             if (SkippedWindowLog && !FrameContentFieldsize) FrameContentFieldsize=1;
+
+   Optional : WindowLog (0 or 1 byte)
+   bit 0-2 : octal Fractional (1/8th)
+   bit 3-7 : Power of 2, with 0 = 1 KB (up to 2 TB)
+
+   Optional : dictID (0, 1, 2 or 4 bytes)
+   Automatic adaptation
+   0 : no dictID
+   1 : 1 - 255
+   2 : 256 - 65535
+   4 : all other values
+
+   Optional : content size (0, 1, 2, 4 or 8 bytes)
+   0 : unknown          (fcfs==0 and swl==0)
+   1 : 0-255 bytes      (fcfs==0 and swl==1)
+   2 : 256 - 65535+256  (fcfs==1)
+   4 : 0 - 4GB-1        (fcfs==2)
+   8 : 0 - 16EB-1       (fcfs==3)
+*/
+
+
+/* Compressed Block, format description
+
+   Block = Literal Section - Sequences Section
+   Prerequisite : size of (compressed) block, maximum size of regenerated data
+
+   1) Literal Section
+
+   1.1) Header : 1-5 bytes
+        flags: 2 bits
+            00 compressed by Huff0
+            01 unused
+            10 is Raw (uncompressed)
+            11 is Rle
+            Note : using 01 => Huff0 with precomputed table ?
+            Note : delta map ? => compressed ?
+
+   1.1.1) Huff0-compressed literal block : 3-5 bytes
+            srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
+            srcSize < 1 KB => 3 bytes (2-2-10-10)
+            srcSize < 16KB => 4 bytes (2-2-14-14)
+            else           => 5 bytes (2-2-18-18)
+            big endian convention
+
+   1.1.2) Raw (uncompressed) literal block header : 1-3 bytes
+        size :  5 bits: (IS_RAW<<6) + (0<<4) + size
+               12 bits: (IS_RAW<<6) + (2<<4) + (size>>8)
+                        size&255
+               20 bits: (IS_RAW<<6) + (3<<4) + (size>>16)
+                        size>>8&255
+                        size&255
+
+   1.1.3) Rle (repeated single byte) literal block header : 1-3 bytes
+        size :  5 bits: (IS_RLE<<6) + (0<<4) + size
+               12 bits: (IS_RLE<<6) + (2<<4) + (size>>8)
+                        size&255
+               20 bits: (IS_RLE<<6) + (3<<4) + (size>>16)
+                        size>>8&255
+                        size&255
+
+   1.1.4) Huff0-compressed literal block, using precomputed CTables : 3-5 bytes
+            srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
+            srcSize < 1 KB => 3 bytes (2-2-10-10)
+            srcSize < 16KB => 4 bytes (2-2-14-14)
+            else           => 5 bytes (2-2-18-18)
+            big endian convention
+
+        1- CTable available (stored into workspace ?)
+        2- Small input (fast heuristic ? Full comparison ? depend on clevel ?)
+
+
+   1.2) Literal block content
+
+   1.2.1) Huff0 block, using sizes from header
+        See Huff0 format
+
+   1.2.2) Huff0 block, using prepared table
+
+   1.2.3) Raw content
+
+   1.2.4) single byte
+
+
+   2) Sequences section
+      TO DO
+*/
+
+/** ZSTDv07_frameHeaderSize() :
+*   srcSize must be >= ZSTDv07_frameHeaderSize_min.
+*   @return : size of the Frame Header */
+static size_t ZSTDv07_frameHeaderSize(const void* src, size_t srcSize)
+{
+    if (srcSize < ZSTDv07_frameHeaderSize_min) return ERROR(srcSize_wrong);
+    {   BYTE const fhd = ((const BYTE*)src)[4];
+        U32 const dictID= fhd & 3;
+        U32 const directMode = (fhd >> 5) & 1;
+        U32 const fcsId = fhd >> 6;
+        return ZSTDv07_frameHeaderSize_min + !directMode + ZSTDv07_did_fieldSize[dictID] + ZSTDv07_fcs_fieldSize[fcsId]
+                + (directMode && !ZSTDv07_fcs_fieldSize[fcsId]);
+    }
+}
+
+
+/** ZSTDv07_getFrameParams() :
+*   decode Frame Header, or require larger `srcSize`.
+*   @return : 0, `fparamsPtr` is correctly filled,
+*            >0, `srcSize` is too small, result is expected `srcSize`,
+*             or an error code, which can be tested using ZSTDv07_isError() */
+size_t ZSTDv07_getFrameParams(ZSTDv07_frameParams* fparamsPtr, const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+
+    if (srcSize < ZSTDv07_frameHeaderSize_min) return ZSTDv07_frameHeaderSize_min;
+    if (MEM_readLE32(src) != ZSTDv07_MAGICNUMBER) {
+        if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTDv07_MAGIC_SKIPPABLE_START) {
+            if (srcSize < ZSTDv07_skippableHeaderSize) return ZSTDv07_skippableHeaderSize; /* magic number + skippable frame length */
+            memset(fparamsPtr, 0, sizeof(*fparamsPtr));
+            fparamsPtr->frameContentSize = MEM_readLE32((const char *)src + 4);
+            fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */
+            return 0;
+        }
+        return ERROR(prefix_unknown);
+    }
+
+    /* ensure there is enough `srcSize` to fully read/decode frame header */
+    { size_t const fhsize = ZSTDv07_frameHeaderSize(src, srcSize);
+      if (srcSize < fhsize) return fhsize; }
+
+    {   BYTE const fhdByte = ip[4];
+        size_t pos = 5;
+        U32 const dictIDSizeCode = fhdByte&3;
+        U32 const checksumFlag = (fhdByte>>2)&1;
+        U32 const directMode = (fhdByte>>5)&1;
+        U32 const fcsID = fhdByte>>6;
+        U32 const windowSizeMax = 1U << ZSTDv07_WINDOWLOG_MAX;
+        U32 windowSize = 0;
+        U32 dictID = 0;
+        U64 frameContentSize = 0;
+        if ((fhdByte & 0x08) != 0) return ERROR(frameParameter_unsupported);   /* reserved bits, which must be zero */
+        if (!directMode) {
+            BYTE const wlByte = ip[pos++];
+            U32 const windowLog = (wlByte >> 3) + ZSTDv07_WINDOWLOG_ABSOLUTEMIN;
+            if (windowLog > ZSTDv07_WINDOWLOG_MAX) return ERROR(frameParameter_unsupported);
+            windowSize = (1U << windowLog);
+            windowSize += (windowSize >> 3) * (wlByte&7);
+        }
+
+        switch(dictIDSizeCode)
+        {
+            default:   /* impossible */
+            case 0 : break;
+            case 1 : dictID = ip[pos]; pos++; break;
+            case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;
+            case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break;
+        }
+        switch(fcsID)
+        {
+            default:   /* impossible */
+            case 0 : if (directMode) frameContentSize = ip[pos]; break;
+            case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;
+            case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
+            case 3 : frameContentSize = MEM_readLE64(ip+pos); break;
+        }
+        if (!windowSize) windowSize = (U32)frameContentSize;
+        if (windowSize > windowSizeMax) return ERROR(frameParameter_unsupported);
+        fparamsPtr->frameContentSize = frameContentSize;
+        fparamsPtr->windowSize = windowSize;
+        fparamsPtr->dictID = dictID;
+        fparamsPtr->checksumFlag = checksumFlag;
+    }
+    return 0;
+}
+
+
+/** ZSTDv07_getDecompressedSize() :
+*   compatible with legacy mode
+*   @return : decompressed size if known, 0 otherwise
+              note : 0 can mean any of the following :
+                   - decompressed size is not provided within frame header
+                   - frame header unknown / not supported
+                   - frame header not completely provided (`srcSize` too small) */
+unsigned long long ZSTDv07_getDecompressedSize(const void* src, size_t srcSize)
+{
+    {   ZSTDv07_frameParams fparams;
+        size_t const frResult = ZSTDv07_getFrameParams(&fparams, src, srcSize);
+        if (frResult!=0) return 0;
+        return fparams.frameContentSize;
+    }
+}
+
+
+/** ZSTDv07_decodeFrameHeader() :
+*   `srcSize` must be the size provided by ZSTDv07_frameHeaderSize().
+*   @return : 0 if success, or an error code, which can be tested using ZSTDv07_isError() */
+static size_t ZSTDv07_decodeFrameHeader(ZSTDv07_DCtx* dctx, const void* src, size_t srcSize)
+{
+    size_t const result = ZSTDv07_getFrameParams(&(dctx->fParams), src, srcSize);
+    if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID)) return ERROR(dictionary_wrong);
+    if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);
+    return result;
+}
+
+
+typedef struct
+{
+    blockType_t blockType;
+    U32 origSize;
+} blockProperties_t;
+
+/*! ZSTDv07_getcBlockSize() :
+*   Provides the size of compressed block from block header `src` */
+size_t ZSTDv07_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
+{
+    const BYTE* const in = (const BYTE* const)src;
+    U32 cSize;
+
+    if (srcSize < ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong);
+
+    bpPtr->blockType = (blockType_t)((*in) >> 6);
+    cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
+    bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
+
+    if (bpPtr->blockType == bt_end) return 0;
+    if (bpPtr->blockType == bt_rle) return 1;
+    return cSize;
+}
+
+
+static size_t ZSTDv07_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
+    memcpy(dst, src, srcSize);
+    return srcSize;
+}
+
+
+/*! ZSTDv07_decodeLiteralsBlock() :
+    @return : nb of bytes read from src (< srcSize ) */
+size_t ZSTDv07_decodeLiteralsBlock(ZSTDv07_DCtx* dctx,
+                          const void* src, size_t srcSize)   /* note : srcSize < BLOCKSIZE */
+{
+    const BYTE* const istart = (const BYTE*) src;
+
+    if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
+
+    switch((litBlockType_t)(istart[0]>> 6))
+    {
+    case lbt_huffman:
+        {   size_t litSize, litCSize, singleStream=0;
+            U32 lhSize = (istart[0] >> 4) & 3;
+            if (srcSize < 5) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for lhSize, + cSize (+nbSeq) */
+            switch(lhSize)
+            {
+            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */
+                /* 2 - 2 - 10 - 10 */
+                lhSize=3;
+                singleStream = istart[0] & 16;
+                litSize  = ((istart[0] & 15) << 6) + (istart[1] >> 2);
+                litCSize = ((istart[1] &  3) << 8) + istart[2];
+                break;
+            case 2:
+                /* 2 - 2 - 14 - 14 */
+                lhSize=4;
+                litSize  = ((istart[0] & 15) << 10) + (istart[1] << 2) + (istart[2] >> 6);
+                litCSize = ((istart[2] & 63) <<  8) + istart[3];
+                break;
+            case 3:
+                /* 2 - 2 - 18 - 18 */
+                lhSize=5;
+                litSize  = ((istart[0] & 15) << 14) + (istart[1] << 6) + (istart[2] >> 2);
+                litCSize = ((istart[2] &  3) << 16) + (istart[3] << 8) + istart[4];
+                break;
+            }
+            if (litSize > ZSTDv07_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected);
+            if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
+
+            if (HUFv07_isError(singleStream ?
+                            HUFv07_decompress1X2_DCtx(dctx->hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize) :
+                            HUFv07_decompress4X_hufOnly (dctx->hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize) ))
+                return ERROR(corruption_detected);
+
+            dctx->litPtr = dctx->litBuffer;
+            dctx->litSize = litSize;
+            dctx->litEntropy = 1;
+            memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+            return litCSize + lhSize;
+        }
+    case lbt_repeat:
+        {   size_t litSize, litCSize;
+            U32 lhSize = ((istart[0]) >> 4) & 3;
+            if (lhSize != 1)  /* only case supported for now : small litSize, single stream */
+                return ERROR(corruption_detected);
+            if (dctx->litEntropy==0)
+                return ERROR(dictionary_corrupted);
+
+            /* 2 - 2 - 10 - 10 */
+            lhSize=3;
+            litSize  = ((istart[0] & 15) << 6) + (istart[1] >> 2);
+            litCSize = ((istart[1] &  3) << 8) + istart[2];
+            if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
+
+            {   size_t const errorCode = HUFv07_decompress1X4_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->hufTable);
+                if (HUFv07_isError(errorCode)) return ERROR(corruption_detected);
+            }
+            dctx->litPtr = dctx->litBuffer;
+            dctx->litSize = litSize;
+            memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+            return litCSize + lhSize;
+        }
+    case lbt_raw:
+        {   size_t litSize;
+            U32 lhSize = ((istart[0]) >> 4) & 3;
+            switch(lhSize)
+            {
+            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */
+                lhSize=1;
+                litSize = istart[0] & 31;
+                break;
+            case 2:
+                litSize = ((istart[0] & 15) << 8) + istart[1];
+                break;
+            case 3:
+                litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
+                break;
+            }
+
+            if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) {  /* risk reading beyond src buffer with wildcopy */
+                if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
+                memcpy(dctx->litBuffer, istart+lhSize, litSize);
+                dctx->litPtr = dctx->litBuffer;
+                dctx->litSize = litSize;
+                memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
+                return lhSize+litSize;
+            }
+            /* direct reference into compressed stream */
+            dctx->litPtr = istart+lhSize;
+            dctx->litSize = litSize;
+            return lhSize+litSize;
+        }
+    case lbt_rle:
+        {   size_t litSize;
+            U32 lhSize = ((istart[0]) >> 4) & 3;
+            switch(lhSize)
+            {
+            case 0: case 1: default:   /* note : default is impossible, since lhSize into [0..3] */
+                lhSize = 1;
+                litSize = istart[0] & 31;
+                break;
+            case 2:
+                litSize = ((istart[0] & 15) << 8) + istart[1];
+                break;
+            case 3:
+                litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
+                if (srcSize<4) return ERROR(corruption_detected);   /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
+                break;
+            }
+            if (litSize > ZSTDv07_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected);
+            memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
+            dctx->litPtr = dctx->litBuffer;
+            dctx->litSize = litSize;
+            return lhSize+1;
+        }
+    default:
+        return ERROR(corruption_detected);   /* impossible */
+    }
+}
+
+
+/*! ZSTDv07_buildSeqTable() :
+    @return : nb bytes read from src,
+              or an error code if it fails, testable with ZSTDv07_isError()
+*/
+size_t ZSTDv07_buildSeqTable(FSEv07_DTable* DTable, U32 type, U32 max, U32 maxLog,
+                                 const void* src, size_t srcSize,
+                                 const S16* defaultNorm, U32 defaultLog, U32 flagRepeatTable)
+{
+    switch(type)
+    {
+    case FSEv07_ENCODING_RLE :
+        if (!srcSize) return ERROR(srcSize_wrong);
+        if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected);
+        FSEv07_buildDTable_rle(DTable, *(const BYTE*)src);   /* if *src > max, data is corrupted */
+        return 1;
+    case FSEv07_ENCODING_RAW :
+        FSEv07_buildDTable(DTable, defaultNorm, max, defaultLog);
+        return 0;
+    case FSEv07_ENCODING_STATIC:
+        if (!flagRepeatTable) return ERROR(corruption_detected);
+        return 0;
+    default :   /* impossible */
+    case FSEv07_ENCODING_DYNAMIC :
+        {   U32 tableLog;
+            S16 norm[MaxSeq+1];
+            size_t const headerSize = FSEv07_readNCount(norm, &max, &tableLog, src, srcSize);
+            if (FSEv07_isError(headerSize)) return ERROR(corruption_detected);
+            if (tableLog > maxLog) return ERROR(corruption_detected);
+            FSEv07_buildDTable(DTable, norm, max, tableLog);
+            return headerSize;
+    }   }
+}
+
+
+size_t ZSTDv07_decodeSeqHeaders(int* nbSeqPtr,
+                             FSEv07_DTable* DTableLL, FSEv07_DTable* DTableML, FSEv07_DTable* DTableOffb, U32 flagRepeatTable,
+                             const void* src, size_t srcSize)
+{
+    const BYTE* const istart = (const BYTE* const)src;
+    const BYTE* const iend = istart + srcSize;
+    const BYTE* ip = istart;
+
+    /* check */
+    if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
+
+    /* SeqHead */
+    {   int nbSeq = *ip++;
+        if (!nbSeq) { *nbSeqPtr=0; return 1; }
+        if (nbSeq > 0x7F) {
+            if (nbSeq == 0xFF) {
+                if (ip+2 > iend) return ERROR(srcSize_wrong);
+                nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
+            } else {
+                if (ip >= iend) return ERROR(srcSize_wrong);
+                nbSeq = ((nbSeq-0x80)<<8) + *ip++;
+            }
+        }
+        *nbSeqPtr = nbSeq;
+    }
+
+    /* FSE table descriptors */
+    {   U32 const LLtype  = *ip >> 6;
+        U32 const OFtype = (*ip >> 4) & 3;
+        U32 const MLtype  = (*ip >> 2) & 3;
+        ip++;
+
+        /* check */
+        if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
+
+        /* Build DTables */
+        {   size_t const llhSize = ZSTDv07_buildSeqTable(DTableLL, LLtype, MaxLL, LLFSELog, ip, iend-ip, LL_defaultNorm, LL_defaultNormLog, flagRepeatTable);
+            if (ZSTDv07_isError(llhSize)) return ERROR(corruption_detected);
+            ip += llhSize;
+        }
+        {   size_t const ofhSize = ZSTDv07_buildSeqTable(DTableOffb, OFtype, MaxOff, OffFSELog, ip, iend-ip, OF_defaultNorm, OF_defaultNormLog, flagRepeatTable);
+            if (ZSTDv07_isError(ofhSize)) return ERROR(corruption_detected);
+            ip += ofhSize;
+        }
+        {   size_t const mlhSize = ZSTDv07_buildSeqTable(DTableML, MLtype, MaxML, MLFSELog, ip, iend-ip, ML_defaultNorm, ML_defaultNormLog, flagRepeatTable);
+            if (ZSTDv07_isError(mlhSize)) return ERROR(corruption_detected);
+            ip += mlhSize;
+    }   }
+
+    return ip-istart;
+}
+
+
+typedef struct {
+    size_t litLength;
+    size_t matchLength;
+    size_t offset;
+} seq_t;
+
+typedef struct {
+    BITv07_DStream_t DStream;
+    FSEv07_DState_t stateLL;
+    FSEv07_DState_t stateOffb;
+    FSEv07_DState_t stateML;
+    size_t prevOffset[ZSTDv07_REP_INIT];
+} seqState_t;
+
+
+static seq_t ZSTDv07_decodeSequence(seqState_t* seqState)
+{
+    seq_t seq;
+
+    U32 const llCode = FSEv07_peekSymbol(&(seqState->stateLL));
+    U32 const mlCode = FSEv07_peekSymbol(&(seqState->stateML));
+    U32 const ofCode = FSEv07_peekSymbol(&(seqState->stateOffb));   /* <= maxOff, by table construction */
+
+    U32 const llBits = LL_bits[llCode];
+    U32 const mlBits = ML_bits[mlCode];
+    U32 const ofBits = ofCode;
+    U32 const totalBits = llBits+mlBits+ofBits;
+
+    static const U32 LL_base[MaxLL+1] = {
+                             0,  1,  2,  3,  4,  5,  6,  7,  8,  9,   10,    11,    12,    13,    14,     15,
+                            16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
+                            0x2000, 0x4000, 0x8000, 0x10000 };
+
+    static const U32 ML_base[MaxML+1] = {
+                             3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,   14,    15,    16,    17,    18,
+                            19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,   30,    31,    32,    33,    34,
+                            35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
+                            0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
+
+    static const U32 OF_base[MaxOff+1] = {
+                 0,        1,       1,       5,     0xD,     0x1D,     0x3D,     0x7D,
+                 0xFD,   0x1FD,   0x3FD,   0x7FD,   0xFFD,   0x1FFD,   0x3FFD,   0x7FFD,
+                 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
+                 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD };
+
+    /* sequence */
+    {   size_t offset;
+        if (!ofCode)
+            offset = 0;
+        else {
+            offset = OF_base[ofCode] + BITv07_readBits(&(seqState->DStream), ofBits);   /* <=  (ZSTDv07_WINDOWLOG_MAX-1) bits */
+            if (MEM_32bits()) BITv07_reloadDStream(&(seqState->DStream));
+        }
+
+        if (ofCode <= 1) {
+            if ((llCode == 0) & (offset <= 1)) offset = 1-offset;
+            if (offset) {
+                size_t const temp = seqState->prevOffset[offset];
+                if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
+                seqState->prevOffset[1] = seqState->prevOffset[0];
+                seqState->prevOffset[0] = offset = temp;
+            } else {
+                offset = seqState->prevOffset[0];
+            }
+        } else {
+            seqState->prevOffset[2] = seqState->prevOffset[1];
+            seqState->prevOffset[1] = seqState->prevOffset[0];
+            seqState->prevOffset[0] = offset;
+        }
+        seq.offset = offset;
+    }
+
+    seq.matchLength = ML_base[mlCode] + ((mlCode>31) ? BITv07_readBits(&(seqState->DStream), mlBits) : 0);   /* <=  16 bits */
+    if (MEM_32bits() && (mlBits+llBits>24)) BITv07_reloadDStream(&(seqState->DStream));
+
+    seq.litLength = LL_base[llCode] + ((llCode>15) ? BITv07_readBits(&(seqState->DStream), llBits) : 0);   /* <=  16 bits */
+    if (MEM_32bits() ||
+       (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BITv07_reloadDStream(&(seqState->DStream));
+
+    /* ANS state update */
+    FSEv07_updateState(&(seqState->stateLL), &(seqState->DStream));   /* <=  9 bits */
+    FSEv07_updateState(&(seqState->stateML), &(seqState->DStream));   /* <=  9 bits */
+    if (MEM_32bits()) BITv07_reloadDStream(&(seqState->DStream));     /* <= 18 bits */
+    FSEv07_updateState(&(seqState->stateOffb), &(seqState->DStream)); /* <=  8 bits */
+
+    return seq;
+}
+
+
+static
+size_t ZSTDv07_execSequence(BYTE* op,
+                                BYTE* const oend, seq_t sequence,
+                                const BYTE** litPtr, const BYTE* const litLimit,
+                                const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
+{
+    BYTE* const oLitEnd = op + sequence.litLength;
+    size_t const sequenceLength = sequence.litLength + sequence.matchLength;
+    BYTE* const oMatchEnd = op + sequenceLength;   /* risk : address space overflow (32-bits) */
+    BYTE* const oend_w = oend-WILDCOPY_OVERLENGTH;
+    const BYTE* const iLitEnd = *litPtr + sequence.litLength;
+    const BYTE* match = oLitEnd - sequence.offset;
+
+    /* check */
+    if ((oLitEnd>oend_w) | (oMatchEnd>oend)) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
+    if (iLitEnd > litLimit) return ERROR(corruption_detected);   /* over-read beyond lit buffer */
+
+    /* copy Literals */
+    ZSTDv07_wildcopy(op, *litPtr, sequence.litLength);   /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
+    op = oLitEnd;
+    *litPtr = iLitEnd;   /* update for next sequence */
+
+    /* copy Match */
+    if (sequence.offset > (size_t)(oLitEnd - base)) {
+        /* offset beyond prefix */
+        if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
+        match = dictEnd - (base-match);
+        if (match + sequence.matchLength <= dictEnd) {
+            memmove(oLitEnd, match, sequence.matchLength);
+            return sequenceLength;
+        }
+        /* span extDict & currentPrefixSegment */
+        {   size_t const length1 = dictEnd - match;
+            memmove(oLitEnd, match, length1);
+            op = oLitEnd + length1;
+            sequence.matchLength -= length1;
+            match = base;
+            if (op > oend_w || sequence.matchLength < MINMATCH) {
+              while (op < oMatchEnd) *op++ = *match++;
+              return sequenceLength;
+            }
+    }   }
+    /* Requirement: op <= oend_w */
+
+    /* match within prefix */
+    if (sequence.offset < 8) {
+        /* close range match, overlap */
+        static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };   /* added */
+        static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 };   /* substracted */
+        int const sub2 = dec64table[sequence.offset];
+        op[0] = match[0];
+        op[1] = match[1];
+        op[2] = match[2];
+        op[3] = match[3];
+        match += dec32table[sequence.offset];
+        ZSTDv07_copy4(op+4, match);
+        match -= sub2;
+    } else {
+        ZSTDv07_copy8(op, match);
+    }
+    op += 8; match += 8;
+
+    if (oMatchEnd > oend-(16-MINMATCH)) {
+        if (op < oend_w) {
+            ZSTDv07_wildcopy(op, match, oend_w - op);
+            match += oend_w - op;
+            op = oend_w;
+        }
+        while (op < oMatchEnd) *op++ = *match++;
+    } else {
+        ZSTDv07_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8);   /* works even if matchLength < 8 */
+    }
+    return sequenceLength;
+}
+
+
+static size_t ZSTDv07_decompressSequences(
+                               ZSTDv07_DCtx* dctx,
+                               void* dst, size_t maxDstSize,
+                         const void* seqStart, size_t seqSize)
+{
+    const BYTE* ip = (const BYTE*)seqStart;
+    const BYTE* const iend = ip + seqSize;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* const oend = ostart + maxDstSize;
+    BYTE* op = ostart;
+    const BYTE* litPtr = dctx->litPtr;
+    const BYTE* const litEnd = litPtr + dctx->litSize;
+    FSEv07_DTable* DTableLL = dctx->LLTable;
+    FSEv07_DTable* DTableML = dctx->MLTable;
+    FSEv07_DTable* DTableOffb = dctx->OffTable;
+    const BYTE* const base = (const BYTE*) (dctx->base);
+    const BYTE* const vBase = (const BYTE*) (dctx->vBase);
+    const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
+    int nbSeq;
+
+    /* Build Decoding Tables */
+    {   size_t const seqHSize = ZSTDv07_decodeSeqHeaders(&nbSeq, DTableLL, DTableML, DTableOffb, dctx->fseEntropy, ip, seqSize);
+        if (ZSTDv07_isError(seqHSize)) return seqHSize;
+        ip += seqHSize;
+    }
+
+    /* Regen sequences */
+    if (nbSeq) {
+        seqState_t seqState;
+        dctx->fseEntropy = 1;
+        { U32 i; for (i=0; i<ZSTDv07_REP_INIT; i++) seqState.prevOffset[i] = dctx->rep[i]; }
+        { size_t const errorCode = BITv07_initDStream(&(seqState.DStream), ip, iend-ip);
+          if (ERR_isError(errorCode)) return ERROR(corruption_detected); }
+        FSEv07_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
+        FSEv07_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
+        FSEv07_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
+
+        for ( ; (BITv07_reloadDStream(&(seqState.DStream)) <= BITv07_DStream_completed) && nbSeq ; ) {
+            nbSeq--;
+            {   seq_t const sequence = ZSTDv07_decodeSequence(&seqState);
+                size_t const oneSeqSize = ZSTDv07_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
+                if (ZSTDv07_isError(oneSeqSize)) return oneSeqSize;
+                op += oneSeqSize;
+        }   }
+
+        /* check if reached exact end */
+        if (nbSeq) return ERROR(corruption_detected);
+        /* save reps for next block */
+        { U32 i; for (i=0; i<ZSTDv07_REP_INIT; i++) dctx->rep[i] = (U32)(seqState.prevOffset[i]); }
+    }
+
+    /* last literal segment */
+    {   size_t const lastLLSize = litEnd - litPtr;
+        //if (litPtr > litEnd) return ERROR(corruption_detected);   /* too many literals already used */
+        if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
+        memcpy(op, litPtr, lastLLSize);
+        op += lastLLSize;
+    }
+
+    return op-ostart;
+}
+
+
+static void ZSTDv07_checkContinuity(ZSTDv07_DCtx* dctx, const void* dst)
+{
+    if (dst != dctx->previousDstEnd) {   /* not contiguous */
+        dctx->dictEnd = dctx->previousDstEnd;
+        dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
+        dctx->base = dst;
+        dctx->previousDstEnd = dst;
+    }
+}
+
+
+static size_t ZSTDv07_decompressBlock_internal(ZSTDv07_DCtx* dctx,
+                            void* dst, size_t dstCapacity,
+                      const void* src, size_t srcSize)
+{   /* blockType == blockCompressed */
+    const BYTE* ip = (const BYTE*)src;
+
+    if (srcSize >= ZSTDv07_BLOCKSIZE_ABSOLUTEMAX) return ERROR(srcSize_wrong);
+
+    /* Decode literals sub-block */
+    {   size_t const litCSize = ZSTDv07_decodeLiteralsBlock(dctx, src, srcSize);
+        if (ZSTDv07_isError(litCSize)) return litCSize;
+        ip += litCSize;
+        srcSize -= litCSize;
+    }
+    return ZSTDv07_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
+}
+
+
+size_t ZSTDv07_decompressBlock(ZSTDv07_DCtx* dctx,
+                            void* dst, size_t dstCapacity,
+                      const void* src, size_t srcSize)
+{
+    size_t dSize;
+    ZSTDv07_checkContinuity(dctx, dst);
+    dSize = ZSTDv07_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
+    dctx->previousDstEnd = (char*)dst + dSize;
+    return dSize;
+}
+
+
+/** ZSTDv07_insertBlock() :
+    insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
+ZSTDLIBv07_API size_t ZSTDv07_insertBlock(ZSTDv07_DCtx* dctx, const void* blockStart, size_t blockSize)
+{
+    ZSTDv07_checkContinuity(dctx, blockStart);
+    dctx->previousDstEnd = (const char*)blockStart + blockSize;
+    return blockSize;
+}
+
+
+size_t ZSTDv07_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length)
+{
+    if (length > dstCapacity) return ERROR(dstSize_tooSmall);
+    memset(dst, byte, length);
+    return length;
+}
+
+
+/*! ZSTDv07_decompressFrame() :
+*   `dctx` must be properly initialized */
+static size_t ZSTDv07_decompressFrame(ZSTDv07_DCtx* dctx,
+                                 void* dst, size_t dstCapacity,
+                                 const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+    const BYTE* const iend = ip + srcSize;
+    BYTE* const ostart = (BYTE* const)dst;
+    BYTE* const oend = ostart + dstCapacity;
+    BYTE* op = ostart;
+    size_t remainingSize = srcSize;
+
+    /* check */
+    if (srcSize < ZSTDv07_frameHeaderSize_min+ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong);
+
+    /* Frame Header */
+    {   size_t const frameHeaderSize = ZSTDv07_frameHeaderSize(src, ZSTDv07_frameHeaderSize_min);
+        if (ZSTDv07_isError(frameHeaderSize)) return frameHeaderSize;
+        if (srcSize < frameHeaderSize+ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong);
+        if (ZSTDv07_decodeFrameHeader(dctx, src, frameHeaderSize)) return ERROR(corruption_detected);
+        ip += frameHeaderSize; remainingSize -= frameHeaderSize;
+    }
+
+    /* Loop on each block */
+    while (1) {
+        size_t decodedSize;
+        blockProperties_t blockProperties;
+        size_t const cBlockSize = ZSTDv07_getcBlockSize(ip, iend-ip, &blockProperties);
+        if (ZSTDv07_isError(cBlockSize)) return cBlockSize;
+
+        ip += ZSTDv07_blockHeaderSize;
+        remainingSize -= ZSTDv07_blockHeaderSize;
+        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+        switch(blockProperties.blockType)
+        {
+        case bt_compressed:
+            decodedSize = ZSTDv07_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize);
+            break;
+        case bt_raw :
+            decodedSize = ZSTDv07_copyRawBlock(op, oend-op, ip, cBlockSize);
+            break;
+        case bt_rle :
+            decodedSize = ZSTDv07_generateNxBytes(op, oend-op, *ip, blockProperties.origSize);
+            break;
+        case bt_end :
+            /* end of frame */
+            if (remainingSize) return ERROR(srcSize_wrong);
+            decodedSize = 0;
+            break;
+        default:
+            return ERROR(GENERIC);   /* impossible */
+        }
+        if (blockProperties.blockType == bt_end) break;   /* bt_end */
+
+        if (ZSTDv07_isError(decodedSize)) return decodedSize;
+        if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, op, decodedSize);
+        op += decodedSize;
+        ip += cBlockSize;
+        remainingSize -= cBlockSize;
+    }
+
+    return op-ostart;
+}
+
+
+/*! ZSTDv07_decompress_usingPreparedDCtx() :
+*   Same as ZSTDv07_decompress_usingDict, but using a reference context `preparedDCtx`, where dictionary has been loaded.
+*   It avoids reloading the dictionary each time.
+*   `preparedDCtx` must have been properly initialized using ZSTDv07_decompressBegin_usingDict().
+*   Requires 2 contexts : 1 for reference (preparedDCtx), which will not be modified, and 1 to run the decompression operation (dctx) */
+size_t ZSTDv07_decompress_usingPreparedDCtx(ZSTDv07_DCtx* dctx, const ZSTDv07_DCtx* refDCtx,
+                                         void* dst, size_t dstCapacity,
+                                   const void* src, size_t srcSize)
+{
+    ZSTDv07_copyDCtx(dctx, refDCtx);
+    ZSTDv07_checkContinuity(dctx, dst);
+    return ZSTDv07_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
+}
+
+
+size_t ZSTDv07_decompress_usingDict(ZSTDv07_DCtx* dctx,
+                                 void* dst, size_t dstCapacity,
+                                 const void* src, size_t srcSize,
+                                 const void* dict, size_t dictSize)
+{
+    ZSTDv07_decompressBegin_usingDict(dctx, dict, dictSize);
+    ZSTDv07_checkContinuity(dctx, dst);
+    return ZSTDv07_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
+}
+
+
+size_t ZSTDv07_decompressDCtx(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    return ZSTDv07_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
+}
+
+
+size_t ZSTDv07_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+#if defined(ZSTDv07_HEAPMODE) && (ZSTDv07_HEAPMODE==1)
+    size_t regenSize;
+    ZSTDv07_DCtx* const dctx = ZSTDv07_createDCtx();
+    if (dctx==NULL) return ERROR(memory_allocation);
+    regenSize = ZSTDv07_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
+    ZSTDv07_freeDCtx(dctx);
+    return regenSize;
+#else   /* stack mode */
+    ZSTDv07_DCtx dctx;
+    return ZSTDv07_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
+#endif
+}
+
+size_t ZSTDv07_findFrameCompressedSize(const void* src, size_t srcSize)
+{
+    const BYTE* ip = (const BYTE*)src;
+    size_t remainingSize = srcSize;
+
+    /* check */
+    if (srcSize < ZSTDv07_frameHeaderSize_min+ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong);
+
+    /* Frame Header */
+    {   size_t const frameHeaderSize = ZSTDv07_frameHeaderSize(src, ZSTDv07_frameHeaderSize_min);
+        if (ZSTDv07_isError(frameHeaderSize)) return frameHeaderSize;
+        if (MEM_readLE32(src) != ZSTDv07_MAGICNUMBER) return ERROR(prefix_unknown);
+        if (srcSize < frameHeaderSize+ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong);
+        ip += frameHeaderSize; remainingSize -= frameHeaderSize;
+    }
+
+    /* Loop on each block */
+    while (1) {
+        blockProperties_t blockProperties;
+        size_t const cBlockSize = ZSTDv07_getcBlockSize(ip, remainingSize, &blockProperties);
+        if (ZSTDv07_isError(cBlockSize)) return cBlockSize;
+
+        ip += ZSTDv07_blockHeaderSize;
+        remainingSize -= ZSTDv07_blockHeaderSize;
+
+        if (blockProperties.blockType == bt_end) break;
+
+        if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
+
+        ip += cBlockSize;
+        remainingSize -= cBlockSize;
+    }
+
+    return ip - (const BYTE*)src;
+}
+
+/*_******************************
+*  Streaming Decompression API
+********************************/
+size_t ZSTDv07_nextSrcSizeToDecompress(ZSTDv07_DCtx* dctx)
+{
+    return dctx->expected;
+}
+
+int ZSTDv07_isSkipFrame(ZSTDv07_DCtx* dctx)
+{
+    return dctx->stage == ZSTDds_skipFrame;
+}
+
+/** ZSTDv07_decompressContinue() :
+*   @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
+*             or an error code, which can be tested using ZSTDv07_isError() */
+size_t ZSTDv07_decompressContinue(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    /* Sanity check */
+    if (srcSize != dctx->expected) return ERROR(srcSize_wrong);
+    if (dstCapacity) ZSTDv07_checkContinuity(dctx, dst);
+
+    switch (dctx->stage)
+    {
+    case ZSTDds_getFrameHeaderSize :
+        if (srcSize != ZSTDv07_frameHeaderSize_min) return ERROR(srcSize_wrong);   /* impossible */
+        if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTDv07_MAGIC_SKIPPABLE_START) {
+            memcpy(dctx->headerBuffer, src, ZSTDv07_frameHeaderSize_min);
+            dctx->expected = ZSTDv07_skippableHeaderSize - ZSTDv07_frameHeaderSize_min; /* magic number + skippable frame length */
+            dctx->stage = ZSTDds_decodeSkippableHeader;
+            return 0;
+        }
+        dctx->headerSize = ZSTDv07_frameHeaderSize(src, ZSTDv07_frameHeaderSize_min);
+        if (ZSTDv07_isError(dctx->headerSize)) return dctx->headerSize;
+        memcpy(dctx->headerBuffer, src, ZSTDv07_frameHeaderSize_min);
+        if (dctx->headerSize > ZSTDv07_frameHeaderSize_min) {
+            dctx->expected = dctx->headerSize - ZSTDv07_frameHeaderSize_min;
+            dctx->stage = ZSTDds_decodeFrameHeader;
+            return 0;
+        }
+        dctx->expected = 0;   /* not necessary to copy more */
+	/* fall-through */
+    case ZSTDds_decodeFrameHeader:
+        {   size_t result;
+            memcpy(dctx->headerBuffer + ZSTDv07_frameHeaderSize_min, src, dctx->expected);
+            result = ZSTDv07_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize);
+            if (ZSTDv07_isError(result)) return result;
+            dctx->expected = ZSTDv07_blockHeaderSize;
+            dctx->stage = ZSTDds_decodeBlockHeader;
+            return 0;
+        }
+    case ZSTDds_decodeBlockHeader:
+        {   blockProperties_t bp;
+            size_t const cBlockSize = ZSTDv07_getcBlockSize(src, ZSTDv07_blockHeaderSize, &bp);
+            if (ZSTDv07_isError(cBlockSize)) return cBlockSize;
+            if (bp.blockType == bt_end) {
+                if (dctx->fParams.checksumFlag) {
+                    U64 const h64 = XXH64_digest(&dctx->xxhState);
+                    U32 const h32 = (U32)(h64>>11) & ((1<<22)-1);
+                    const BYTE* const ip = (const BYTE*)src;
+                    U32 const check32 = ip[2] + (ip[1] << 8) + ((ip[0] & 0x3F) << 16);
+                    if (check32 != h32) return ERROR(checksum_wrong);
+                }
+                dctx->expected = 0;
+                dctx->stage = ZSTDds_getFrameHeaderSize;
+            } else {
+                dctx->expected = cBlockSize;
+                dctx->bType = bp.blockType;
+                dctx->stage = ZSTDds_decompressBlock;
+            }
+            return 0;
+        }
+    case ZSTDds_decompressBlock:
+        {   size_t rSize;
+            switch(dctx->bType)
+            {
+            case bt_compressed:
+                rSize = ZSTDv07_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
+                break;
+            case bt_raw :
+                rSize = ZSTDv07_copyRawBlock(dst, dstCapacity, src, srcSize);
+                break;
+            case bt_rle :
+                return ERROR(GENERIC);   /* not yet handled */
+                break;
+            case bt_end :   /* should never happen (filtered at phase 1) */
+                rSize = 0;
+                break;
+            default:
+                return ERROR(GENERIC);   /* impossible */
+            }
+            dctx->stage = ZSTDds_decodeBlockHeader;
+            dctx->expected = ZSTDv07_blockHeaderSize;
+            dctx->previousDstEnd = (char*)dst + rSize;
+            if (ZSTDv07_isError(rSize)) return rSize;
+            if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);
+            return rSize;
+        }
+    case ZSTDds_decodeSkippableHeader:
+        {   memcpy(dctx->headerBuffer + ZSTDv07_frameHeaderSize_min, src, dctx->expected);
+            dctx->expected = MEM_readLE32(dctx->headerBuffer + 4);
+            dctx->stage = ZSTDds_skipFrame;
+            return 0;
+        }
+    case ZSTDds_skipFrame:
+        {   dctx->expected = 0;
+            dctx->stage = ZSTDds_getFrameHeaderSize;
+            return 0;
+        }
+    default:
+        return ERROR(GENERIC);   /* impossible */
+    }
+}
+
+
+static size_t ZSTDv07_refDictContent(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize)
+{
+    dctx->dictEnd = dctx->previousDstEnd;
+    dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
+    dctx->base = dict;
+    dctx->previousDstEnd = (const char*)dict + dictSize;
+    return 0;
+}
+
+static size_t ZSTDv07_loadEntropy(ZSTDv07_DCtx* dctx, const void* const dict, size_t const dictSize)
+{
+    const BYTE* dictPtr = (const BYTE*)dict;
+    const BYTE* const dictEnd = dictPtr + dictSize;
+
+    {   size_t const hSize = HUFv07_readDTableX4(dctx->hufTable, dict, dictSize);
+        if (HUFv07_isError(hSize)) return ERROR(dictionary_corrupted);
+        dictPtr += hSize;
+    }
+
+    {   short offcodeNCount[MaxOff+1];
+        U32 offcodeMaxValue=MaxOff, offcodeLog;
+        size_t const offcodeHeaderSize = FSEv07_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
+        if (FSEv07_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
+        if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
+        { size_t const errorCode = FSEv07_buildDTable(dctx->OffTable, offcodeNCount, offcodeMaxValue, offcodeLog);
+          if (FSEv07_isError(errorCode)) return ERROR(dictionary_corrupted); }
+        dictPtr += offcodeHeaderSize;
+    }
+
+    {   short matchlengthNCount[MaxML+1];
+        unsigned matchlengthMaxValue = MaxML, matchlengthLog;
+        size_t const matchlengthHeaderSize = FSEv07_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
+        if (FSEv07_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
+        if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
+        { size_t const errorCode = FSEv07_buildDTable(dctx->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog);
+          if (FSEv07_isError(errorCode)) return ERROR(dictionary_corrupted); }
+        dictPtr += matchlengthHeaderSize;
+    }
+
+    {   short litlengthNCount[MaxLL+1];
+        unsigned litlengthMaxValue = MaxLL, litlengthLog;
+        size_t const litlengthHeaderSize = FSEv07_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
+        if (FSEv07_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
+        if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
+        { size_t const errorCode = FSEv07_buildDTable(dctx->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog);
+          if (FSEv07_isError(errorCode)) return ERROR(dictionary_corrupted); }
+        dictPtr += litlengthHeaderSize;
+    }
+
+    if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
+    dctx->rep[0] = MEM_readLE32(dictPtr+0); if (dctx->rep[0] == 0 || dctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted);
+    dctx->rep[1] = MEM_readLE32(dictPtr+4); if (dctx->rep[1] == 0 || dctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted);
+    dctx->rep[2] = MEM_readLE32(dictPtr+8); if (dctx->rep[2] == 0 || dctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted);
+    dictPtr += 12;
+
+    dctx->litEntropy = dctx->fseEntropy = 1;
+    return dictPtr - (const BYTE*)dict;
+}
+
+static size_t ZSTDv07_decompress_insertDictionary(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize)
+{
+    if (dictSize < 8) return ZSTDv07_refDictContent(dctx, dict, dictSize);
+    {   U32 const magic = MEM_readLE32(dict);
+        if (magic != ZSTDv07_DICT_MAGIC) {
+            return ZSTDv07_refDictContent(dctx, dict, dictSize);   /* pure content mode */
+    }   }
+    dctx->dictID = MEM_readLE32((const char*)dict + 4);
+
+    /* load entropy tables */
+    dict = (const char*)dict + 8;
+    dictSize -= 8;
+    {   size_t const eSize = ZSTDv07_loadEntropy(dctx, dict, dictSize);
+        if (ZSTDv07_isError(eSize)) return ERROR(dictionary_corrupted);
+        dict = (const char*)dict + eSize;
+        dictSize -= eSize;
+    }
+
+    /* reference dictionary content */
+    return ZSTDv07_refDictContent(dctx, dict, dictSize);
+}
+
+
+size_t ZSTDv07_decompressBegin_usingDict(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize)
+{
+    { size_t const errorCode = ZSTDv07_decompressBegin(dctx);
+      if (ZSTDv07_isError(errorCode)) return errorCode; }
+
+    if (dict && dictSize) {
+        size_t const errorCode = ZSTDv07_decompress_insertDictionary(dctx, dict, dictSize);
+        if (ZSTDv07_isError(errorCode)) return ERROR(dictionary_corrupted);
+    }
+
+    return 0;
+}
+
+
+struct ZSTDv07_DDict_s {
+    void* dict;
+    size_t dictSize;
+    ZSTDv07_DCtx* refContext;
+};  /* typedef'd tp ZSTDv07_CDict within zstd.h */
+
+ZSTDv07_DDict* ZSTDv07_createDDict_advanced(const void* dict, size_t dictSize, ZSTDv07_customMem customMem)
+{
+    if (!customMem.customAlloc && !customMem.customFree)
+        customMem = defaultCustomMem;
+
+    if (!customMem.customAlloc || !customMem.customFree)
+        return NULL;
+
+    {   ZSTDv07_DDict* const ddict = (ZSTDv07_DDict*) customMem.customAlloc(customMem.opaque, sizeof(*ddict));
+        void* const dictContent = customMem.customAlloc(customMem.opaque, dictSize);
+        ZSTDv07_DCtx* const dctx = ZSTDv07_createDCtx_advanced(customMem);
+
+        if (!dictContent || !ddict || !dctx) {
+            customMem.customFree(customMem.opaque, dictContent);
+            customMem.customFree(customMem.opaque, ddict);
+            customMem.customFree(customMem.opaque, dctx);
+            return NULL;
+        }
+
+        memcpy(dictContent, dict, dictSize);
+        {   size_t const errorCode = ZSTDv07_decompressBegin_usingDict(dctx, dictContent, dictSize);
+            if (ZSTDv07_isError(errorCode)) {
+                customMem.customFree(customMem.opaque, dictContent);
+                customMem.customFree(customMem.opaque, ddict);
+                customMem.customFree(customMem.opaque, dctx);
+                return NULL;
+        }   }
+
+        ddict->dict = dictContent;
+        ddict->dictSize = dictSize;
+        ddict->refContext = dctx;
+        return ddict;
+    }
+}
+
+/*! ZSTDv07_createDDict() :
+*   Create a digested dictionary, ready to start decompression without startup delay.
+*   `dict` can be released after `ZSTDv07_DDict` creation */
+ZSTDv07_DDict* ZSTDv07_createDDict(const void* dict, size_t dictSize)
+{
+    ZSTDv07_customMem const allocator = { NULL, NULL, NULL };
+    return ZSTDv07_createDDict_advanced(dict, dictSize, allocator);
+}
+
+size_t ZSTDv07_freeDDict(ZSTDv07_DDict* ddict)
+{
+    ZSTDv07_freeFunction const cFree = ddict->refContext->customMem.customFree;
+    void* const opaque = ddict->refContext->customMem.opaque;
+    ZSTDv07_freeDCtx(ddict->refContext);
+    cFree(opaque, ddict->dict);
+    cFree(opaque, ddict);
+    return 0;
+}
+
+/*! ZSTDv07_decompress_usingDDict() :
+*   Decompression using a pre-digested Dictionary
+*   Use dictionary without significant overhead. */
+ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDDict(ZSTDv07_DCtx* dctx,
+                                           void* dst, size_t dstCapacity,
+                                     const void* src, size_t srcSize,
+                                     const ZSTDv07_DDict* ddict)
+{
+    return ZSTDv07_decompress_usingPreparedDCtx(dctx, ddict->refContext,
+                                           dst, dstCapacity,
+                                           src, srcSize);
+}
+/*
+    Buffered version of Zstd compression library
+    Copyright (C) 2015-2016, Yann Collet.
+
+    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are
+    met:
+    * Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+    copyright notice, this list of conditions and the following disclaimer
+    in the documentation and/or other materials provided with the
+    distribution.
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+    LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+    DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+    THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+    (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+    You can contact the author at :
+    - zstd homepage : http://www.zstd.net/
+*/
+
+
+
+/*-***************************************************************************
+*  Streaming decompression howto
+*
+*  A ZBUFFv07_DCtx object is required to track streaming operations.
+*  Use ZBUFFv07_createDCtx() and ZBUFFv07_freeDCtx() to create/release resources.
+*  Use ZBUFFv07_decompressInit() to start a new decompression operation,
+*   or ZBUFFv07_decompressInitDictionary() if decompression requires a dictionary.
+*  Note that ZBUFFv07_DCtx objects can be re-init multiple times.
+*
+*  Use ZBUFFv07_decompressContinue() repetitively to consume your input.
+*  *srcSizePtr and *dstCapacityPtr can be any size.
+*  The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
+*  Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
+*  The content of @dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change @dst.
+*  @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),
+*            or 0 when a frame is completely decoded,
+*            or an error code, which can be tested using ZBUFFv07_isError().
+*
+*  Hint : recommended buffer sizes (not compulsory) : ZBUFFv07_recommendedDInSize() and ZBUFFv07_recommendedDOutSize()
+*  output : ZBUFFv07_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
+*  input  : ZBUFFv07_recommendedDInSize == 128KB + 3;
+*           just follow indications from ZBUFFv07_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
+* *******************************************************************************/
+
+typedef enum { ZBUFFds_init, ZBUFFds_loadHeader,
+               ZBUFFds_read, ZBUFFds_load, ZBUFFds_flush } ZBUFFv07_dStage;
+
+/* *** Resource management *** */
+struct ZBUFFv07_DCtx_s {
+    ZSTDv07_DCtx* zd;
+    ZSTDv07_frameParams fParams;
+    ZBUFFv07_dStage stage;
+    char*  inBuff;
+    size_t inBuffSize;
+    size_t inPos;
+    char*  outBuff;
+    size_t outBuffSize;
+    size_t outStart;
+    size_t outEnd;
+    size_t blockSize;
+    BYTE headerBuffer[ZSTDv07_FRAMEHEADERSIZE_MAX];
+    size_t lhSize;
+    ZSTDv07_customMem customMem;
+};   /* typedef'd to ZBUFFv07_DCtx within "zstd_buffered.h" */
+
+ZSTDLIBv07_API ZBUFFv07_DCtx* ZBUFFv07_createDCtx_advanced(ZSTDv07_customMem customMem);
+
+ZBUFFv07_DCtx* ZBUFFv07_createDCtx(void)
+{
+    return ZBUFFv07_createDCtx_advanced(defaultCustomMem);
+}
+
+ZBUFFv07_DCtx* ZBUFFv07_createDCtx_advanced(ZSTDv07_customMem customMem)
+{
+    ZBUFFv07_DCtx* zbd;
+
+    if (!customMem.customAlloc && !customMem.customFree)
+        customMem = defaultCustomMem;
+
+    if (!customMem.customAlloc || !customMem.customFree)
+        return NULL;
+
+    zbd = (ZBUFFv07_DCtx*)customMem.customAlloc(customMem.opaque, sizeof(ZBUFFv07_DCtx));
+    if (zbd==NULL) return NULL;
+    memset(zbd, 0, sizeof(ZBUFFv07_DCtx));
+    memcpy(&zbd->customMem, &customMem, sizeof(ZSTDv07_customMem));
+    zbd->zd = ZSTDv07_createDCtx_advanced(customMem);
+    if (zbd->zd == NULL) { ZBUFFv07_freeDCtx(zbd); return NULL; }
+    zbd->stage = ZBUFFds_init;
+    return zbd;
+}
+
+size_t ZBUFFv07_freeDCtx(ZBUFFv07_DCtx* zbd)
+{
+    if (zbd==NULL) return 0;   /* support free on null */
+    ZSTDv07_freeDCtx(zbd->zd);
+    if (zbd->inBuff) zbd->customMem.customFree(zbd->customMem.opaque, zbd->inBuff);
+    if (zbd->outBuff) zbd->customMem.customFree(zbd->customMem.opaque, zbd->outBuff);
+    zbd->customMem.customFree(zbd->customMem.opaque, zbd);
+    return 0;
+}
+
+
+/* *** Initialization *** */
+
+size_t ZBUFFv07_decompressInitDictionary(ZBUFFv07_DCtx* zbd, const void* dict, size_t dictSize)
+{
+    zbd->stage = ZBUFFds_loadHeader;
+    zbd->lhSize = zbd->inPos = zbd->outStart = zbd->outEnd = 0;
+    return ZSTDv07_decompressBegin_usingDict(zbd->zd, dict, dictSize);
+}
+
+size_t ZBUFFv07_decompressInit(ZBUFFv07_DCtx* zbd)
+{
+    return ZBUFFv07_decompressInitDictionary(zbd, NULL, 0);
+}
+
+
+/* internal util function */
+MEM_STATIC size_t ZBUFFv07_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
+{
+    size_t const length = MIN(dstCapacity, srcSize);
+    memcpy(dst, src, length);
+    return length;
+}
+
+
+/* *** Decompression *** */
+
+size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* zbd,
+                                void* dst, size_t* dstCapacityPtr,
+                          const void* src, size_t* srcSizePtr)
+{
+    const char* const istart = (const char*)src;
+    const char* const iend = istart + *srcSizePtr;
+    const char* ip = istart;
+    char* const ostart = (char*)dst;
+    char* const oend = ostart + *dstCapacityPtr;
+    char* op = ostart;
+    U32 notDone = 1;
+
+    while (notDone) {
+        switch(zbd->stage)
+        {
+        case ZBUFFds_init :
+            return ERROR(init_missing);
+
+        case ZBUFFds_loadHeader :
+            {   size_t const hSize = ZSTDv07_getFrameParams(&(zbd->fParams), zbd->headerBuffer, zbd->lhSize);
+                if (ZSTDv07_isError(hSize)) return hSize;
+                if (hSize != 0) {
+                    size_t const toLoad = hSize - zbd->lhSize;   /* if hSize!=0, hSize > zbd->lhSize */
+                    if (toLoad > (size_t)(iend-ip)) {   /* not enough input to load full header */
+                        memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip);
+                        zbd->lhSize += iend-ip;
+                        *dstCapacityPtr = 0;
+                        return (hSize - zbd->lhSize) + ZSTDv07_blockHeaderSize;   /* remaining header bytes + next block header */
+                    }
+                    memcpy(zbd->headerBuffer + zbd->lhSize, ip, toLoad); zbd->lhSize = hSize; ip += toLoad;
+                    break;
+            }   }
+
+            /* Consume header */
+            {   size_t const h1Size = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);  /* == ZSTDv07_frameHeaderSize_min */
+                size_t const h1Result = ZSTDv07_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer, h1Size);
+                if (ZSTDv07_isError(h1Result)) return h1Result;
+                if (h1Size < zbd->lhSize) {   /* long header */
+                    size_t const h2Size = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);
+                    size_t const h2Result = ZSTDv07_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer+h1Size, h2Size);
+                    if (ZSTDv07_isError(h2Result)) return h2Result;
+            }   }
+
+            zbd->fParams.windowSize = MAX(zbd->fParams.windowSize, 1U << ZSTDv07_WINDOWLOG_ABSOLUTEMIN);
+
+            /* Frame header instruct buffer sizes */
+            {   size_t const blockSize = MIN(zbd->fParams.windowSize, ZSTDv07_BLOCKSIZE_ABSOLUTEMAX);
+                zbd->blockSize = blockSize;
+                if (zbd->inBuffSize < blockSize) {
+                    zbd->customMem.customFree(zbd->customMem.opaque, zbd->inBuff);
+                    zbd->inBuffSize = blockSize;
+                    zbd->inBuff = (char*)zbd->customMem.customAlloc(zbd->customMem.opaque, blockSize);
+                    if (zbd->inBuff == NULL) return ERROR(memory_allocation);
+                }
+                {   size_t const neededOutSize = zbd->fParams.windowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
+                    if (zbd->outBuffSize < neededOutSize) {
+                        zbd->customMem.customFree(zbd->customMem.opaque, zbd->outBuff);
+                        zbd->outBuffSize = neededOutSize;
+                        zbd->outBuff = (char*)zbd->customMem.customAlloc(zbd->customMem.opaque, neededOutSize);
+                        if (zbd->outBuff == NULL) return ERROR(memory_allocation);
+            }   }   }
+            zbd->stage = ZBUFFds_read;
+            /* pass-through */
+	    /* fall-through */
+        case ZBUFFds_read:
+            {   size_t const neededInSize = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);
+                if (neededInSize==0) {  /* end of frame */
+                    zbd->stage = ZBUFFds_init;
+                    notDone = 0;
+                    break;
+                }
+                if ((size_t)(iend-ip) >= neededInSize) {  /* decode directly from src */
+                    const int isSkipFrame = ZSTDv07_isSkipFrame(zbd->zd);
+                    size_t const decodedSize = ZSTDv07_decompressContinue(zbd->zd,
+                        zbd->outBuff + zbd->outStart, (isSkipFrame ? 0 : zbd->outBuffSize - zbd->outStart),
+                        ip, neededInSize);
+                    if (ZSTDv07_isError(decodedSize)) return decodedSize;
+                    ip += neededInSize;
+                    if (!decodedSize && !isSkipFrame) break;   /* this was just a header */
+                    zbd->outEnd = zbd->outStart +  decodedSize;
+                    zbd->stage = ZBUFFds_flush;
+                    break;
+                }
+                if (ip==iend) { notDone = 0; break; }   /* no more input */
+                zbd->stage = ZBUFFds_load;
+            }
+	    /* fall-through */
+        case ZBUFFds_load:
+            {   size_t const neededInSize = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);
+                size_t const toLoad = neededInSize - zbd->inPos;   /* should always be <= remaining space within inBuff */
+                size_t loadedSize;
+                if (toLoad > zbd->inBuffSize - zbd->inPos) return ERROR(corruption_detected);   /* should never happen */
+                loadedSize = ZBUFFv07_limitCopy(zbd->inBuff + zbd->inPos, toLoad, ip, iend-ip);
+                ip += loadedSize;
+                zbd->inPos += loadedSize;
+                if (loadedSize < toLoad) { notDone = 0; break; }   /* not enough input, wait for more */
+
+                /* decode loaded input */
+                {  const int isSkipFrame = ZSTDv07_isSkipFrame(zbd->zd);
+                   size_t const decodedSize = ZSTDv07_decompressContinue(zbd->zd,
+                        zbd->outBuff + zbd->outStart, zbd->outBuffSize - zbd->outStart,
+                        zbd->inBuff, neededInSize);
+                    if (ZSTDv07_isError(decodedSize)) return decodedSize;
+                    zbd->inPos = 0;   /* input is consumed */
+                    if (!decodedSize && !isSkipFrame) { zbd->stage = ZBUFFds_read; break; }   /* this was just a header */
+                    zbd->outEnd = zbd->outStart +  decodedSize;
+                    zbd->stage = ZBUFFds_flush;
+                    /* break; */
+                    /* pass-through */
+                }
+	    }
+	    /* fall-through */
+        case ZBUFFds_flush:
+            {   size_t const toFlushSize = zbd->outEnd - zbd->outStart;
+                size_t const flushedSize = ZBUFFv07_limitCopy(op, oend-op, zbd->outBuff + zbd->outStart, toFlushSize);
+                op += flushedSize;
+                zbd->outStart += flushedSize;
+                if (flushedSize == toFlushSize) {
+                    zbd->stage = ZBUFFds_read;
+                    if (zbd->outStart + zbd->blockSize > zbd->outBuffSize)
+                        zbd->outStart = zbd->outEnd = 0;
+                    break;
+                }
+                /* cannot flush everything */
+                notDone = 0;
+                break;
+            }
+        default: return ERROR(GENERIC);   /* impossible */
+    }   }
+
+    /* result */
+    *srcSizePtr = ip-istart;
+    *dstCapacityPtr = op-ostart;
+    {   size_t nextSrcSizeHint = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);
+        nextSrcSizeHint -= zbd->inPos;   /* already loaded*/
+        return nextSrcSizeHint;
+    }
+}
+
+
+
+/* *************************************
+*  Tool functions
+***************************************/
+size_t ZBUFFv07_recommendedDInSize(void)  { return ZSTDv07_BLOCKSIZE_ABSOLUTEMAX + ZSTDv07_blockHeaderSize /* block header size*/ ; }
+size_t ZBUFFv07_recommendedDOutSize(void) { return ZSTDv07_BLOCKSIZE_ABSOLUTEMAX; }
diff --git a/vendor/github.com/DataDog/zstd/zstd_v07.h b/vendor/github.com/DataDog/zstd/zstd_v07.h
new file mode 100644
index 0000000..6591cd3
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstd_v07.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+#ifndef ZSTDv07_H_235446
+#define ZSTDv07_H_235446
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*======  Dependency  ======*/
+#include <stddef.h>   /* size_t */
+
+
+/*======  Export for Windows  ======*/
+/*!
+*  ZSTDv07_DLL_EXPORT :
+*  Enable exporting of functions when building a Windows DLL
+*/
+#if defined(_WIN32) && defined(ZSTDv07_DLL_EXPORT) && (ZSTDv07_DLL_EXPORT==1)
+#  define ZSTDLIBv07_API __declspec(dllexport)
+#else
+#  define ZSTDLIBv07_API
+#endif
+
+
+/* *************************************
+*  Simple API
+***************************************/
+/*! ZSTDv07_getDecompressedSize() :
+*   @return : decompressed size if known, 0 otherwise.
+       note 1 : if `0`, follow up with ZSTDv07_getFrameParams() to know precise failure cause.
+       note 2 : decompressed size could be wrong or intentionally modified !
+                always ensure results fit within application's authorized limits */
+unsigned long long ZSTDv07_getDecompressedSize(const void* src, size_t srcSize);
+
+/*! ZSTDv07_decompress() :
+    `compressedSize` : must be _exact_ size of compressed input, otherwise decompression will fail.
+    `dstCapacity` must be equal or larger than originalSize.
+    @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
+              or an errorCode if it fails (which can be tested using ZSTDv07_isError()) */
+ZSTDLIBv07_API size_t ZSTDv07_decompress( void* dst, size_t dstCapacity,
+                                    const void* src, size_t compressedSize);
+
+/**
+ZSTDv07_getFrameSrcSize() : get the source length of a ZSTD frame
+    compressedSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
+    return : the number of bytes that would be read to decompress this frame
+             or an errorCode if it fails (which can be tested using ZSTDv07_isError())
+*/
+size_t ZSTDv07_findFrameCompressedSize(const void* src, size_t compressedSize);
+
+/*======  Helper functions  ======*/
+ZSTDLIBv07_API unsigned    ZSTDv07_isError(size_t code);          /*!< tells if a `size_t` function result is an error code */
+ZSTDLIBv07_API const char* ZSTDv07_getErrorName(size_t code);     /*!< provides readable string from an error code */
+
+
+/*-*************************************
+*  Explicit memory management
+***************************************/
+/** Decompression context */
+typedef struct ZSTDv07_DCtx_s ZSTDv07_DCtx;
+ZSTDLIBv07_API ZSTDv07_DCtx* ZSTDv07_createDCtx(void);
+ZSTDLIBv07_API size_t     ZSTDv07_freeDCtx(ZSTDv07_DCtx* dctx);      /*!< @return : errorCode */
+
+/** ZSTDv07_decompressDCtx() :
+*   Same as ZSTDv07_decompress(), requires an allocated ZSTDv07_DCtx (see ZSTDv07_createDCtx()) */
+ZSTDLIBv07_API size_t ZSTDv07_decompressDCtx(ZSTDv07_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
+
+
+/*-************************
+*  Simple dictionary API
+***************************/
+/*! ZSTDv07_decompress_usingDict() :
+*   Decompression using a pre-defined Dictionary content (see dictBuilder).
+*   Dictionary must be identical to the one used during compression.
+*   Note : This function load the dictionary, resulting in a significant startup time */
+ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDict(ZSTDv07_DCtx* dctx,
+                                                   void* dst, size_t dstCapacity,
+                                             const void* src, size_t srcSize,
+                                             const void* dict,size_t dictSize);
+
+
+/*-**************************
+*  Advanced Dictionary API
+****************************/
+/*! ZSTDv07_createDDict() :
+*   Create a digested dictionary, ready to start decompression operation without startup delay.
+*   `dict` can be released after creation */
+typedef struct ZSTDv07_DDict_s ZSTDv07_DDict;
+ZSTDLIBv07_API ZSTDv07_DDict* ZSTDv07_createDDict(const void* dict, size_t dictSize);
+ZSTDLIBv07_API size_t      ZSTDv07_freeDDict(ZSTDv07_DDict* ddict);
+
+/*! ZSTDv07_decompress_usingDDict() :
+*   Decompression using a pre-digested Dictionary
+*   Faster startup than ZSTDv07_decompress_usingDict(), recommended when same dictionary is used multiple times. */
+ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDDict(ZSTDv07_DCtx* dctx,
+                                                    void* dst, size_t dstCapacity,
+                                              const void* src, size_t srcSize,
+                                              const ZSTDv07_DDict* ddict);
+
+typedef struct {
+    unsigned long long frameContentSize;
+    unsigned windowSize;
+    unsigned dictID;
+    unsigned checksumFlag;
+} ZSTDv07_frameParams;
+
+ZSTDLIBv07_API size_t ZSTDv07_getFrameParams(ZSTDv07_frameParams* fparamsPtr, const void* src, size_t srcSize);   /**< doesn't consume input */
+
+
+
+
+/* *************************************
+*  Streaming functions
+***************************************/
+typedef struct ZBUFFv07_DCtx_s ZBUFFv07_DCtx;
+ZSTDLIBv07_API ZBUFFv07_DCtx* ZBUFFv07_createDCtx(void);
+ZSTDLIBv07_API size_t      ZBUFFv07_freeDCtx(ZBUFFv07_DCtx* dctx);
+
+ZSTDLIBv07_API size_t ZBUFFv07_decompressInit(ZBUFFv07_DCtx* dctx);
+ZSTDLIBv07_API size_t ZBUFFv07_decompressInitDictionary(ZBUFFv07_DCtx* dctx, const void* dict, size_t dictSize);
+
+ZSTDLIBv07_API size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* dctx,
+                                            void* dst, size_t* dstCapacityPtr,
+                                      const void* src, size_t* srcSizePtr);
+
+/*-***************************************************************************
+*  Streaming decompression howto
+*
+*  A ZBUFFv07_DCtx object is required to track streaming operations.
+*  Use ZBUFFv07_createDCtx() and ZBUFFv07_freeDCtx() to create/release resources.
+*  Use ZBUFFv07_decompressInit() to start a new decompression operation,
+*   or ZBUFFv07_decompressInitDictionary() if decompression requires a dictionary.
+*  Note that ZBUFFv07_DCtx objects can be re-init multiple times.
+*
+*  Use ZBUFFv07_decompressContinue() repetitively to consume your input.
+*  *srcSizePtr and *dstCapacityPtr can be any size.
+*  The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
+*  Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
+*  The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`.
+*  @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),
+*            or 0 when a frame is completely decoded,
+*            or an error code, which can be tested using ZBUFFv07_isError().
+*
+*  Hint : recommended buffer sizes (not compulsory) : ZBUFFv07_recommendedDInSize() and ZBUFFv07_recommendedDOutSize()
+*  output : ZBUFFv07_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
+*  input  : ZBUFFv07_recommendedDInSize == 128KB + 3;
+*           just follow indications from ZBUFFv07_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
+* *******************************************************************************/
+
+
+/* *************************************
+*  Tool functions
+***************************************/
+ZSTDLIBv07_API unsigned ZBUFFv07_isError(size_t errorCode);
+ZSTDLIBv07_API const char* ZBUFFv07_getErrorName(size_t errorCode);
+
+/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
+*   These sizes are just hints, they tend to offer better latency */
+ZSTDLIBv07_API size_t ZBUFFv07_recommendedDInSize(void);
+ZSTDLIBv07_API size_t ZBUFFv07_recommendedDOutSize(void);
+
+
+/*-*************************************
+*  Constants
+***************************************/
+#define ZSTDv07_MAGICNUMBER            0xFD2FB527   /* v0.7 */
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif  /* ZSTDv07_H_235446 */
diff --git a/vendor/github.com/DataDog/zstd/zstdmt_compress.c b/vendor/github.com/DataDog/zstd/zstdmt_compress.c
new file mode 100755
index 0000000..c7a205d
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstdmt_compress.c
@@ -0,0 +1,1831 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+
+/* ======   Tuning parameters   ====== */
+#define ZSTDMT_NBWORKERS_MAX 200
+#define ZSTDMT_JOBSIZE_MAX  (MEM_32bits() ? (512 MB) : (2 GB))  /* note : limited by `jobSize` type, which is `unsigned` */
+#define ZSTDMT_OVERLAPLOG_DEFAULT 6
+
+
+/* ======   Compiler specifics   ====== */
+#if defined(_MSC_VER)
+#  pragma warning(disable : 4204)   /* disable: C4204: non-constant aggregate initializer */
+#endif
+
+
+/* ======   Dependencies   ====== */
+#include <string.h>      /* memcpy, memset */
+#include <limits.h>      /* INT_MAX */
+#include "pool.h"        /* threadpool */
+#include "threading.h"   /* mutex */
+#include "zstd_compress_internal.h"  /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
+#include "zstd_ldm.h"
+#include "zstdmt_compress.h"
+
+/* Guards code to support resizing the SeqPool.
+ * We will want to resize the SeqPool to save memory in the future.
+ * Until then, comment the code out since it is unused.
+ */
+#define ZSTD_RESIZE_SEQPOOL 0
+
+/* ======   Debug   ====== */
+#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=2)
+
+#  include <stdio.h>
+#  include <unistd.h>
+#  include <sys/times.h>
+#  define DEBUGLOGRAW(l, ...) if (l<=ZSTD_DEBUG) { fprintf(stderr, __VA_ARGS__); }
+
+#  define DEBUG_PRINTHEX(l,p,n) {            \
+    unsigned debug_u;                        \
+    for (debug_u=0; debug_u<(n); debug_u++)  \
+        DEBUGLOGRAW(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \
+    DEBUGLOGRAW(l, " \n");                   \
+}
+
+static unsigned long long GetCurrentClockTimeMicroseconds(void)
+{
+   static clock_t _ticksPerSecond = 0;
+   if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK);
+
+   { struct tms junk; clock_t newTicks = (clock_t) times(&junk);
+     return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond); }
+}
+
+#define MUTEX_WAIT_TIME_DLEVEL 6
+#define ZSTD_PTHREAD_MUTEX_LOCK(mutex) {          \
+    if (ZSTD_DEBUG >= MUTEX_WAIT_TIME_DLEVEL) {   \
+        unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \
+        ZSTD_pthread_mutex_lock(mutex);           \
+        {   unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \
+            unsigned long long const elapsedTime = (afterTime-beforeTime); \
+            if (elapsedTime > 1000) {  /* or whatever threshold you like; I'm using 1 millisecond here */ \
+                DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \
+                   elapsedTime, #mutex);          \
+        }   }                                     \
+    } else {                                      \
+        ZSTD_pthread_mutex_lock(mutex);           \
+    }                                             \
+}
+
+#else
+
+#  define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m)
+#  define DEBUG_PRINTHEX(l,p,n) {}
+
+#endif
+
+
+/* =====   Buffer Pool   ===== */
+/* a single Buffer Pool can be invoked from multiple threads in parallel */
+
+typedef struct buffer_s {
+    void* start;
+    size_t capacity;
+} buffer_t;
+
+static const buffer_t g_nullBuffer = { NULL, 0 };
+
+typedef struct ZSTDMT_bufferPool_s {
+    ZSTD_pthread_mutex_t poolMutex;
+    size_t bufferSize;
+    unsigned totalBuffers;
+    unsigned nbBuffers;
+    ZSTD_customMem cMem;
+    buffer_t bTable[1];   /* variable size */
+} ZSTDMT_bufferPool;
+
+static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbWorkers, ZSTD_customMem cMem)
+{
+    unsigned const maxNbBuffers = 2*nbWorkers + 3;
+    ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_calloc(
+        sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem);
+    if (bufPool==NULL) return NULL;
+    if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) {
+        ZSTD_free(bufPool, cMem);
+        return NULL;
+    }
+    bufPool->bufferSize = 64 KB;
+    bufPool->totalBuffers = maxNbBuffers;
+    bufPool->nbBuffers = 0;
+    bufPool->cMem = cMem;
+    return bufPool;
+}
+
+static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
+{
+    unsigned u;
+    DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool);
+    if (!bufPool) return;   /* compatibility with free on NULL */
+    for (u=0; u<bufPool->totalBuffers; u++) {
+        DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->bTable[u].start);
+        ZSTD_free(bufPool->bTable[u].start, bufPool->cMem);
+    }
+    ZSTD_pthread_mutex_destroy(&bufPool->poolMutex);
+    ZSTD_free(bufPool, bufPool->cMem);
+}
+
+/* only works at initialization, not during compression */
+static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool)
+{
+    size_t const poolSize = sizeof(*bufPool)
+                          + (bufPool->totalBuffers - 1) * sizeof(buffer_t);
+    unsigned u;
+    size_t totalBufferSize = 0;
+    ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
+    for (u=0; u<bufPool->totalBuffers; u++)
+        totalBufferSize += bufPool->bTable[u].capacity;
+    ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
+
+    return poolSize + totalBufferSize;
+}
+
+/* ZSTDMT_setBufferSize() :
+ * all future buffers provided by this buffer pool will have _at least_ this size
+ * note : it's better for all buffers to have same size,
+ * as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */
+static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const bSize)
+{
+    ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
+    DEBUGLOG(4, "ZSTDMT_setBufferSize: bSize = %u", (U32)bSize);
+    bufPool->bufferSize = bSize;
+    ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
+}
+
+/** ZSTDMT_getBuffer() :
+ *  assumption : bufPool must be valid
+ * @return : a buffer, with start pointer and size
+ *  note: allocation may fail, in this case, start==NULL and size==0 */
+static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
+{
+    size_t const bSize = bufPool->bufferSize;
+    DEBUGLOG(5, "ZSTDMT_getBuffer: bSize = %u", (U32)bufPool->bufferSize);
+    ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
+    if (bufPool->nbBuffers) {   /* try to use an existing buffer */
+        buffer_t const buf = bufPool->bTable[--(bufPool->nbBuffers)];
+        size_t const availBufferSize = buf.capacity;
+        bufPool->bTable[bufPool->nbBuffers] = g_nullBuffer;
+        if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) {
+            /* large enough, but not too much */
+            DEBUGLOG(5, "ZSTDMT_getBuffer: provide buffer %u of size %u",
+                        bufPool->nbBuffers, (U32)buf.capacity);
+            ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
+            return buf;
+        }
+        /* size conditions not respected : scratch this buffer, create new one */
+        DEBUGLOG(5, "ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing");
+        ZSTD_free(buf.start, bufPool->cMem);
+    }
+    ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
+    /* create new buffer */
+    DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer");
+    {   buffer_t buffer;
+        void* const start = ZSTD_malloc(bSize, bufPool->cMem);
+        buffer.start = start;   /* note : start can be NULL if malloc fails ! */
+        buffer.capacity = (start==NULL) ? 0 : bSize;
+        if (start==NULL) {
+            DEBUGLOG(5, "ZSTDMT_getBuffer: buffer allocation failure !!");
+        } else {
+            DEBUGLOG(5, "ZSTDMT_getBuffer: created buffer of size %u", (U32)bSize);
+        }
+        return buffer;
+    }
+}
+
+#if ZSTD_RESIZE_SEQPOOL
+/** ZSTDMT_resizeBuffer() :
+ * assumption : bufPool must be valid
+ * @return : a buffer that is at least the buffer pool buffer size.
+ *           If a reallocation happens, the data in the input buffer is copied.
+ */
+static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer)
+{
+    size_t const bSize = bufPool->bufferSize;
+    if (buffer.capacity < bSize) {
+        void* const start = ZSTD_malloc(bSize, bufPool->cMem);
+        buffer_t newBuffer;
+        newBuffer.start = start;
+        newBuffer.capacity = start == NULL ? 0 : bSize;
+        if (start != NULL) {
+            assert(newBuffer.capacity >= buffer.capacity);
+            memcpy(newBuffer.start, buffer.start, buffer.capacity);
+            DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize);
+            return newBuffer;
+        }
+        DEBUGLOG(5, "ZSTDMT_resizeBuffer: buffer allocation failure !!");
+    }
+    return buffer;
+}
+#endif
+
+/* store buffer for later re-use, up to pool capacity */
+static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
+{
+    if (buf.start == NULL) return;   /* compatible with release on NULL */
+    DEBUGLOG(5, "ZSTDMT_releaseBuffer");
+    ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
+    if (bufPool->nbBuffers < bufPool->totalBuffers) {
+        bufPool->bTable[bufPool->nbBuffers++] = buf;  /* stored for later use */
+        DEBUGLOG(5, "ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u",
+                    (U32)buf.capacity, (U32)(bufPool->nbBuffers-1));
+        ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
+        return;
+    }
+    ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
+    /* Reached bufferPool capacity (should not happen) */
+    DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing ");
+    ZSTD_free(buf.start, bufPool->cMem);
+}
+
+
+/* =====   Seq Pool Wrapper   ====== */
+
+static rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0};
+
+typedef ZSTDMT_bufferPool ZSTDMT_seqPool;
+
+static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool)
+{
+    return ZSTDMT_sizeof_bufferPool(seqPool);
+}
+
+static rawSeqStore_t bufferToSeq(buffer_t buffer)
+{
+    rawSeqStore_t seq = {NULL, 0, 0, 0};
+    seq.seq = (rawSeq*)buffer.start;
+    seq.capacity = buffer.capacity / sizeof(rawSeq);
+    return seq;
+}
+
+static buffer_t seqToBuffer(rawSeqStore_t seq)
+{
+    buffer_t buffer;
+    buffer.start = seq.seq;
+    buffer.capacity = seq.capacity * sizeof(rawSeq);
+    return buffer;
+}
+
+static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool)
+{
+    if (seqPool->bufferSize == 0) {
+        return kNullRawSeqStore;
+    }
+    return bufferToSeq(ZSTDMT_getBuffer(seqPool));
+}
+
+#if ZSTD_RESIZE_SEQPOOL
+static rawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
+{
+  return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq)));
+}
+#endif
+
+static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
+{
+  ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq));
+}
+
+static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq)
+{
+  ZSTDMT_setBufferSize(seqPool, nbSeq * sizeof(rawSeq));
+}
+
+static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem)
+{
+    ZSTDMT_seqPool* seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
+    ZSTDMT_setNbSeq(seqPool, 0);
+    return seqPool;
+}
+
+static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool)
+{
+    ZSTDMT_freeBufferPool(seqPool);
+}
+
+
+
+/* =====   CCtx Pool   ===== */
+/* a single CCtx Pool can be invoked from multiple threads in parallel */
+
+typedef struct {
+    ZSTD_pthread_mutex_t poolMutex;
+    unsigned totalCCtx;
+    unsigned availCCtx;
+    ZSTD_customMem cMem;
+    ZSTD_CCtx* cctx[1];   /* variable size */
+} ZSTDMT_CCtxPool;
+
+/* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */
+static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
+{
+    unsigned u;
+    for (u=0; u<pool->totalCCtx; u++)
+        ZSTD_freeCCtx(pool->cctx[u]);  /* note : compatible with free on NULL */
+    ZSTD_pthread_mutex_destroy(&pool->poolMutex);
+    ZSTD_free(pool, pool->cMem);
+}
+
+/* ZSTDMT_createCCtxPool() :
+ * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */
+static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(unsigned nbWorkers,
+                                              ZSTD_customMem cMem)
+{
+    ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_calloc(
+        sizeof(ZSTDMT_CCtxPool) + (nbWorkers-1)*sizeof(ZSTD_CCtx*), cMem);
+    assert(nbWorkers > 0);
+    if (!cctxPool) return NULL;
+    if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) {
+        ZSTD_free(cctxPool, cMem);
+        return NULL;
+    }
+    cctxPool->cMem = cMem;
+    cctxPool->totalCCtx = nbWorkers;
+    cctxPool->availCCtx = 1;   /* at least one cctx for single-thread mode */
+    cctxPool->cctx[0] = ZSTD_createCCtx_advanced(cMem);
+    if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; }
+    DEBUGLOG(3, "cctxPool created, with %u workers", nbWorkers);
+    return cctxPool;
+}
+
+/* only works during initialization phase, not during compression */
+static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
+{
+    ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
+    {   unsigned const nbWorkers = cctxPool->totalCCtx;
+        size_t const poolSize = sizeof(*cctxPool)
+                                + (nbWorkers-1) * sizeof(ZSTD_CCtx*);
+        unsigned u;
+        size_t totalCCtxSize = 0;
+        for (u=0; u<nbWorkers; u++) {
+            totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctx[u]);
+        }
+        ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
+        assert(nbWorkers > 0);
+        return poolSize + totalCCtxSize;
+    }
+}
+
+static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool)
+{
+    DEBUGLOG(5, "ZSTDMT_getCCtx");
+    ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
+    if (cctxPool->availCCtx) {
+        cctxPool->availCCtx--;
+        {   ZSTD_CCtx* const cctx = cctxPool->cctx[cctxPool->availCCtx];
+            ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
+            return cctx;
+    }   }
+    ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
+    DEBUGLOG(5, "create one more CCtx");
+    return ZSTD_createCCtx_advanced(cctxPool->cMem);   /* note : can be NULL, when creation fails ! */
+}
+
+static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)
+{
+    if (cctx==NULL) return;   /* compatibility with release on NULL */
+    ZSTD_pthread_mutex_lock(&pool->poolMutex);
+    if (pool->availCCtx < pool->totalCCtx)
+        pool->cctx[pool->availCCtx++] = cctx;
+    else {
+        /* pool overflow : should not happen, since totalCCtx==nbWorkers */
+        DEBUGLOG(4, "CCtx pool overflow : free cctx");
+        ZSTD_freeCCtx(cctx);
+    }
+    ZSTD_pthread_mutex_unlock(&pool->poolMutex);
+}
+
+/* ====   Serial State   ==== */
+
+typedef struct {
+    void const* start;
+    size_t size;
+} range_t;
+
+typedef struct {
+    /* All variables in the struct are protected by mutex. */
+    ZSTD_pthread_mutex_t mutex;
+    ZSTD_pthread_cond_t cond;
+    ZSTD_CCtx_params params;
+    ldmState_t ldmState;
+    XXH64_state_t xxhState;
+    unsigned nextJobID;
+    /* Protects ldmWindow.
+     * Must be acquired after the main mutex when acquiring both.
+     */
+    ZSTD_pthread_mutex_t ldmWindowMutex;
+    ZSTD_pthread_cond_t ldmWindowCond;  /* Signaled when ldmWindow is udpated */
+    ZSTD_window_t ldmWindow;  /* A thread-safe copy of ldmState.window */
+} serialState_t;
+
+static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool* seqPool, ZSTD_CCtx_params params)
+{
+    /* Adjust parameters */
+    if (params.ldmParams.enableLdm) {
+        DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10);
+        params.ldmParams.windowLog = params.cParams.windowLog;
+        ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
+        assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
+        assert(params.ldmParams.hashEveryLog < 32);
+        serialState->ldmState.hashPower =
+                ZSTD_ldm_getHashPower(params.ldmParams.minMatchLength);
+    } else {
+        memset(&params.ldmParams, 0, sizeof(params.ldmParams));
+    }
+    serialState->nextJobID = 0;
+    if (params.fParams.checksumFlag)
+        XXH64_reset(&serialState->xxhState, 0);
+    if (params.ldmParams.enableLdm) {
+        ZSTD_customMem cMem = params.customMem;
+        unsigned const hashLog = params.ldmParams.hashLog;
+        size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t);
+        unsigned const bucketLog =
+            params.ldmParams.hashLog - params.ldmParams.bucketSizeLog;
+        size_t const bucketSize = (size_t)1 << bucketLog;
+        unsigned const prevBucketLog =
+            serialState->params.ldmParams.hashLog -
+            serialState->params.ldmParams.bucketSizeLog;
+        /* Size the seq pool tables */
+        ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, params.jobSize));
+        /* Reset the window */
+        ZSTD_window_clear(&serialState->ldmState.window);
+        serialState->ldmWindow = serialState->ldmState.window;
+        /* Resize tables and output space if necessary. */
+        if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) {
+            ZSTD_free(serialState->ldmState.hashTable, cMem);
+            serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_malloc(hashSize, cMem);
+        }
+        if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) {
+            ZSTD_free(serialState->ldmState.bucketOffsets, cMem);
+            serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_malloc(bucketSize, cMem);
+        }
+        if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets)
+            return 1;
+        /* Zero the tables */
+        memset(serialState->ldmState.hashTable, 0, hashSize);
+        memset(serialState->ldmState.bucketOffsets, 0, bucketSize);
+    }
+    serialState->params = params;
+    return 0;
+}
+
+static int ZSTDMT_serialState_init(serialState_t* serialState)
+{
+    int initError = 0;
+    memset(serialState, 0, sizeof(*serialState));
+    initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL);
+    initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL);
+    initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL);
+    initError |= ZSTD_pthread_cond_init(&serialState->ldmWindowCond, NULL);
+    return initError;
+}
+
+static void ZSTDMT_serialState_free(serialState_t* serialState)
+{
+    ZSTD_customMem cMem = serialState->params.customMem;
+    ZSTD_pthread_mutex_destroy(&serialState->mutex);
+    ZSTD_pthread_cond_destroy(&serialState->cond);
+    ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex);
+    ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond);
+    ZSTD_free(serialState->ldmState.hashTable, cMem);
+    ZSTD_free(serialState->ldmState.bucketOffsets, cMem);
+}
+
+static void ZSTDMT_serialState_update(serialState_t* serialState,
+                                      ZSTD_CCtx* jobCCtx, rawSeqStore_t seqStore,
+                                      range_t src, unsigned jobID)
+{
+    /* Wait for our turn */
+    ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
+    while (serialState->nextJobID < jobID) {
+        ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex);
+    }
+    /* A future job may error and skip our job */
+    if (serialState->nextJobID == jobID) {
+        /* It is now our turn, do any processing necessary */
+        if (serialState->params.ldmParams.enableLdm) {
+            size_t error;
+            assert(seqStore.seq != NULL && seqStore.pos == 0 &&
+                   seqStore.size == 0 && seqStore.capacity > 0);
+            ZSTD_window_update(&serialState->ldmState.window, src.start, src.size);
+            error = ZSTD_ldm_generateSequences(
+                &serialState->ldmState, &seqStore,
+                &serialState->params.ldmParams, src.start, src.size);
+            /* We provide a large enough buffer to never fail. */
+            assert(!ZSTD_isError(error)); (void)error;
+            /* Update ldmWindow to match the ldmState.window and signal the main
+             * thread if it is waiting for a buffer.
+             */
+            ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
+            serialState->ldmWindow = serialState->ldmState.window;
+            ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
+            ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
+        }
+        if (serialState->params.fParams.checksumFlag && src.size > 0)
+            XXH64_update(&serialState->xxhState, src.start, src.size);
+    }
+    /* Now it is the next jobs turn */
+    serialState->nextJobID++;
+    ZSTD_pthread_cond_broadcast(&serialState->cond);
+    ZSTD_pthread_mutex_unlock(&serialState->mutex);
+
+    if (seqStore.size > 0) {
+        size_t const err = ZSTD_referenceExternalSequences(
+            jobCCtx, seqStore.seq, seqStore.size);
+        assert(serialState->params.ldmParams.enableLdm);
+        assert(!ZSTD_isError(err));
+        (void)err;
+    }
+}
+
+static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState,
+                                              unsigned jobID, size_t cSize)
+{
+    ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
+    if (serialState->nextJobID <= jobID) {
+        assert(ZSTD_isError(cSize)); (void)cSize;
+        DEBUGLOG(5, "Skipping past job %u because of error", jobID);
+        serialState->nextJobID = jobID + 1;
+        ZSTD_pthread_cond_broadcast(&serialState->cond);
+
+        ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
+        ZSTD_window_clear(&serialState->ldmWindow);
+        ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
+        ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
+    }
+    ZSTD_pthread_mutex_unlock(&serialState->mutex);
+
+}
+
+
+/* ------------------------------------------ */
+/* =====          Worker thread         ===== */
+/* ------------------------------------------ */
+
+static const range_t kNullRange = { NULL, 0 };
+
+typedef struct {
+    size_t   consumed;                   /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */
+    size_t   cSize;                      /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */
+    ZSTD_pthread_mutex_t job_mutex;      /* Thread-safe - used by mtctx and worker */
+    ZSTD_pthread_cond_t job_cond;        /* Thread-safe - used by mtctx and worker */
+    ZSTDMT_CCtxPool* cctxPool;           /* Thread-safe - used by mtctx and (all) workers */
+    ZSTDMT_bufferPool* bufPool;          /* Thread-safe - used by mtctx and (all) workers */
+    ZSTDMT_seqPool* seqPool;             /* Thread-safe - used by mtctx and (all) workers */
+    serialState_t* serial;               /* Thread-safe - used by mtctx and (all) workers */
+    buffer_t dstBuff;                    /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */
+    range_t prefix;                      /* set by mtctx, then read by worker & mtctx => no barrier */
+    range_t src;                         /* set by mtctx, then read by worker & mtctx => no barrier */
+    unsigned jobID;                      /* set by mtctx, then read by worker => no barrier */
+    unsigned firstJob;                   /* set by mtctx, then read by worker => no barrier */
+    unsigned lastJob;                    /* set by mtctx, then read by worker => no barrier */
+    ZSTD_CCtx_params params;             /* set by mtctx, then read by worker => no barrier */
+    const ZSTD_CDict* cdict;             /* set by mtctx, then read by worker => no barrier */
+    unsigned long long fullFrameSize;    /* set by mtctx, then read by worker => no barrier */
+    size_t   dstFlushed;                 /* used only by mtctx */
+    unsigned frameChecksumNeeded;        /* used only by mtctx */
+} ZSTDMT_jobDescription;
+
+/* ZSTDMT_compressionJob() is a POOL_function type */
+void ZSTDMT_compressionJob(void* jobDescription)
+{
+    ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
+    ZSTD_CCtx_params jobParams = job->params;   /* do not modify job->params ! copy it, modify the copy */
+    ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool);
+    rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool);
+    buffer_t dstBuff = job->dstBuff;
+
+    /* Don't compute the checksum for chunks, since we compute it externally,
+     * but write it in the header.
+     */
+    if (job->jobID != 0) jobParams.fParams.checksumFlag = 0;
+    /* Don't run LDM for the chunks, since we handle it externally */
+    jobParams.ldmParams.enableLdm = 0;
+
+    /* ressources */
+    if (cctx==NULL) {
+        job->cSize = ERROR(memory_allocation);
+        goto _endJob;
+    }
+    if (dstBuff.start == NULL) {   /* streaming job : doesn't provide a dstBuffer */
+        dstBuff = ZSTDMT_getBuffer(job->bufPool);
+        if (dstBuff.start==NULL) {
+            job->cSize = ERROR(memory_allocation);
+            goto _endJob;
+        }
+        job->dstBuff = dstBuff;   /* this value can be read in ZSTDMT_flush, when it copies the whole job */
+    }
+
+    /* init */
+    if (job->cdict) {
+        size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, job->cdict, jobParams, job->fullFrameSize);
+        assert(job->firstJob);  /* only allowed for first job */
+        if (ZSTD_isError(initError)) { job->cSize = initError; goto _endJob; }
+    } else {  /* srcStart points at reloaded section */
+        U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size;
+        {   size_t const forceWindowError = ZSTD_CCtxParam_setParameter(&jobParams, ZSTD_p_forceMaxWindow, !job->firstJob);
+            if (ZSTD_isError(forceWindowError)) {
+                job->cSize = forceWindowError;
+                goto _endJob;
+        }   }
+        {   size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
+                                        job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
+                                        NULL, /*cdict*/
+                                        jobParams, pledgedSrcSize);
+            if (ZSTD_isError(initError)) {
+                job->cSize = initError;
+                goto _endJob;
+    }   }   }
+
+    /* Perform serial step as early as possible, but after CCtx initialization */
+    ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID);
+
+    if (!job->firstJob) {  /* flush and overwrite frame header when it's not first job */
+        size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0);
+        if (ZSTD_isError(hSize)) { job->cSize = hSize; /* save error code */ goto _endJob; }
+        DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize);
+        ZSTD_invalidateRepCodes(cctx);
+    }
+
+    /* compress */
+    {   size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX;
+        int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize);
+        const BYTE* ip = (const BYTE*) job->src.start;
+        BYTE* const ostart = (BYTE*)dstBuff.start;
+        BYTE* op = ostart;
+        BYTE* oend = op + dstBuff.capacity;
+        int chunkNb;
+        if (sizeof(size_t) > sizeof(int)) assert(job->src.size < ((size_t)INT_MAX) * chunkSize);   /* check overflow */
+        DEBUGLOG(5, "ZSTDMT_compressionJob: compress %u bytes in %i blocks", (U32)job->src.size, nbChunks);
+        assert(job->cSize == 0);
+        for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) {
+            size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, chunkSize);
+            if (ZSTD_isError(cSize)) { job->cSize = cSize; goto _endJob; }
+            ip += chunkSize;
+            op += cSize; assert(op < oend);
+            /* stats */
+            ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
+            job->cSize += cSize;
+            job->consumed = chunkSize * chunkNb;
+            DEBUGLOG(5, "ZSTDMT_compressionJob: compress new block : cSize==%u bytes (total: %u)",
+                        (U32)cSize, (U32)job->cSize);
+            ZSTD_pthread_cond_signal(&job->job_cond);   /* warns some more data is ready to be flushed */
+            ZSTD_pthread_mutex_unlock(&job->job_mutex);
+        }
+        /* last block */
+        assert(chunkSize > 0); assert((chunkSize & (chunkSize - 1)) == 0);  /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */
+        if ((nbChunks > 0) | job->lastJob /*must output a "last block" flag*/ ) {
+            size_t const lastBlockSize1 = job->src.size & (chunkSize-1);
+            size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1;
+            size_t const cSize = (job->lastJob) ?
+                 ZSTD_compressEnd     (cctx, op, oend-op, ip, lastBlockSize) :
+                 ZSTD_compressContinue(cctx, op, oend-op, ip, lastBlockSize);
+            if (ZSTD_isError(cSize)) { job->cSize = cSize; goto _endJob; }
+            /* stats */
+            ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
+            job->cSize += cSize;
+            ZSTD_pthread_mutex_unlock(&job->job_mutex);
+    }   }
+
+_endJob:
+    ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize);
+    if (job->prefix.size > 0)
+        DEBUGLOG(5, "Finished with prefix: %zx", (size_t)job->prefix.start);
+    DEBUGLOG(5, "Finished with source: %zx", (size_t)job->src.start);
+    /* release resources */
+    ZSTDMT_releaseSeq(job->seqPool, rawSeqStore);
+    ZSTDMT_releaseCCtx(job->cctxPool, cctx);
+    /* report */
+    ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
+    job->consumed = job->src.size;
+    ZSTD_pthread_cond_signal(&job->job_cond);
+    ZSTD_pthread_mutex_unlock(&job->job_mutex);
+}
+
+
+/* ------------------------------------------ */
+/* =====   Multi-threaded compression   ===== */
+/* ------------------------------------------ */
+
+typedef struct {
+    range_t prefix;         /* read-only non-owned prefix buffer */
+    buffer_t buffer;
+    size_t filled;
+} inBuff_t;
+
+typedef struct {
+  BYTE* buffer;     /* The round input buffer. All jobs get references
+                     * to pieces of the buffer. ZSTDMT_tryGetInputRange()
+                     * handles handing out job input buffers, and makes
+                     * sure it doesn't overlap with any pieces still in use.
+                     */
+  size_t capacity;  /* The capacity of buffer. */
+  size_t pos;       /* The position of the current inBuff in the round
+                     * buffer. Updated past the end if the inBuff once
+                     * the inBuff is sent to the worker thread.
+                     * pos <= capacity.
+                     */
+} roundBuff_t;
+
+static const roundBuff_t kNullRoundBuff = {NULL, 0, 0};
+
+struct ZSTDMT_CCtx_s {
+    POOL_ctx* factory;
+    ZSTDMT_jobDescription* jobs;
+    ZSTDMT_bufferPool* bufPool;
+    ZSTDMT_CCtxPool* cctxPool;
+    ZSTDMT_seqPool* seqPool;
+    ZSTD_CCtx_params params;
+    size_t targetSectionSize;
+    size_t targetPrefixSize;
+    roundBuff_t roundBuff;
+    inBuff_t inBuff;
+    int jobReady;        /* 1 => one job is already prepared, but pool has shortage of workers. Don't create another one. */
+    serialState_t serial;
+    unsigned singleBlockingThread;
+    unsigned jobIDMask;
+    unsigned doneJobID;
+    unsigned nextJobID;
+    unsigned frameEnded;
+    unsigned allJobsCompleted;
+    unsigned long long frameContentSize;
+    unsigned long long consumed;
+    unsigned long long produced;
+    ZSTD_customMem cMem;
+    ZSTD_CDict* cdictLocal;
+    const ZSTD_CDict* cdict;
+};
+
+static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem)
+{
+    U32 jobNb;
+    if (jobTable == NULL) return;
+    for (jobNb=0; jobNb<nbJobs; jobNb++) {
+        ZSTD_pthread_mutex_destroy(&jobTable[jobNb].job_mutex);
+        ZSTD_pthread_cond_destroy(&jobTable[jobNb].job_cond);
+    }
+    ZSTD_free(jobTable, cMem);
+}
+
+/* ZSTDMT_allocJobsTable()
+ * allocate and init a job table.
+ * update *nbJobsPtr to next power of 2 value, as size of table */
+static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(U32* nbJobsPtr, ZSTD_customMem cMem)
+{
+    U32 const nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1;
+    U32 const nbJobs = 1 << nbJobsLog2;
+    U32 jobNb;
+    ZSTDMT_jobDescription* const jobTable = (ZSTDMT_jobDescription*)
+                ZSTD_calloc(nbJobs * sizeof(ZSTDMT_jobDescription), cMem);
+    int initError = 0;
+    if (jobTable==NULL) return NULL;
+    *nbJobsPtr = nbJobs;
+    for (jobNb=0; jobNb<nbJobs; jobNb++) {
+        initError |= ZSTD_pthread_mutex_init(&jobTable[jobNb].job_mutex, NULL);
+        initError |= ZSTD_pthread_cond_init(&jobTable[jobNb].job_cond, NULL);
+    }
+    if (initError != 0) {
+        ZSTDMT_freeJobsTable(jobTable, nbJobs, cMem);
+        return NULL;
+    }
+    return jobTable;
+}
+
+/* ZSTDMT_CCtxParam_setNbWorkers():
+ * Internal use only */
+size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
+{
+    if (nbWorkers > ZSTDMT_NBWORKERS_MAX) nbWorkers = ZSTDMT_NBWORKERS_MAX;
+    params->nbWorkers = nbWorkers;
+    params->overlapSizeLog = ZSTDMT_OVERLAPLOG_DEFAULT;
+    params->jobSize = 0;
+    return nbWorkers;
+}
+
+ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem)
+{
+    ZSTDMT_CCtx* mtctx;
+    U32 nbJobs = nbWorkers + 2;
+    int initError;
+    DEBUGLOG(3, "ZSTDMT_createCCtx_advanced (nbWorkers = %u)", nbWorkers);
+
+    if (nbWorkers < 1) return NULL;
+    nbWorkers = MIN(nbWorkers , ZSTDMT_NBWORKERS_MAX);
+    if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL))
+        /* invalid custom allocator */
+        return NULL;
+
+    mtctx = (ZSTDMT_CCtx*) ZSTD_calloc(sizeof(ZSTDMT_CCtx), cMem);
+    if (!mtctx) return NULL;
+    ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
+    mtctx->cMem = cMem;
+    mtctx->allJobsCompleted = 1;
+    mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem);
+    mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem);
+    assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0);  /* ensure nbJobs is a power of 2 */
+    mtctx->jobIDMask = nbJobs - 1;
+    mtctx->bufPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
+    mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem);
+    mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem);
+    initError = ZSTDMT_serialState_init(&mtctx->serial);
+    mtctx->roundBuff = kNullRoundBuff;
+    if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool | !mtctx->seqPool | initError) {
+        ZSTDMT_freeCCtx(mtctx);
+        return NULL;
+    }
+    DEBUGLOG(3, "mt_cctx created, for %u threads", nbWorkers);
+    return mtctx;
+}
+
+ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers)
+{
+    return ZSTDMT_createCCtx_advanced(nbWorkers, ZSTD_defaultCMem);
+}
+
+
+/* ZSTDMT_releaseAllJobResources() :
+ * note : ensure all workers are killed first ! */
+static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
+{
+    unsigned jobID;
+    DEBUGLOG(3, "ZSTDMT_releaseAllJobResources");
+    for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {
+        DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start);
+        ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
+        mtctx->jobs[jobID].dstBuff = g_nullBuffer;
+        mtctx->jobs[jobID].cSize = 0;
+    }
+    memset(mtctx->jobs, 0, (mtctx->jobIDMask+1)*sizeof(ZSTDMT_jobDescription));
+    mtctx->inBuff.buffer = g_nullBuffer;
+    mtctx->inBuff.filled = 0;
+    mtctx->allJobsCompleted = 1;
+}
+
+static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx)
+{
+    DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted");
+    while (mtctx->doneJobID < mtctx->nextJobID) {
+        unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask;
+        ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
+        while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
+            DEBUGLOG(5, "waiting for jobCompleted signal from job %u", mtctx->doneJobID);   /* we want to block when waiting for data to flush */
+            ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
+        }
+        ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
+        mtctx->doneJobID++;
+    }
+}
+
+size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
+{
+    if (mtctx==NULL) return 0;   /* compatible with free on NULL */
+    POOL_free(mtctx->factory);   /* stop and free worker threads */
+    ZSTDMT_releaseAllJobResources(mtctx);  /* release job resources into pools first */
+    ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
+    ZSTDMT_freeBufferPool(mtctx->bufPool);
+    ZSTDMT_freeCCtxPool(mtctx->cctxPool);
+    ZSTDMT_freeSeqPool(mtctx->seqPool);
+    ZSTDMT_serialState_free(&mtctx->serial);
+    ZSTD_freeCDict(mtctx->cdictLocal);
+    if (mtctx->roundBuff.buffer)
+        ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem);
+    ZSTD_free(mtctx, mtctx->cMem);
+    return 0;
+}
+
+size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx)
+{
+    if (mtctx == NULL) return 0;   /* supports sizeof NULL */
+    return sizeof(*mtctx)
+            + POOL_sizeof(mtctx->factory)
+            + ZSTDMT_sizeof_bufferPool(mtctx->bufPool)
+            + (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription)
+            + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool)
+            + ZSTDMT_sizeof_seqPool(mtctx->seqPool)
+            + ZSTD_sizeof_CDict(mtctx->cdictLocal)
+            + mtctx->roundBuff.capacity;
+}
+
+/* Internal only */
+size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params,
+                                ZSTDMT_parameter parameter, unsigned value) {
+    DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter");
+    switch(parameter)
+    {
+    case ZSTDMT_p_jobSize :
+        DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter : set jobSize to %u", value);
+        if ( (value > 0)  /* value==0 => automatic job size */
+           & (value < ZSTDMT_JOBSIZE_MIN) )
+            value = ZSTDMT_JOBSIZE_MIN;
+        params->jobSize = value;
+        return value;
+    case ZSTDMT_p_overlapSectionLog :
+        if (value > 9) value = 9;
+        DEBUGLOG(4, "ZSTDMT_p_overlapSectionLog : %u", value);
+        params->overlapSizeLog = (value >= 9) ? 9 : value;
+        return value;
+    default :
+        return ERROR(parameter_unsupported);
+    }
+}
+
+size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, unsigned value)
+{
+    DEBUGLOG(4, "ZSTDMT_setMTCtxParameter");
+    switch(parameter)
+    {
+    case ZSTDMT_p_jobSize :
+        return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value);
+    case ZSTDMT_p_overlapSectionLog :
+        return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value);
+    default :
+        return ERROR(parameter_unsupported);
+    }
+}
+
+/* Sets parameters relevant to the compression job,
+ * initializing others to default values. */
+static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
+{
+    ZSTD_CCtx_params jobParams;
+    memset(&jobParams, 0, sizeof(jobParams));
+
+    jobParams.cParams = params.cParams;
+    jobParams.fParams = params.fParams;
+    jobParams.compressionLevel = params.compressionLevel;
+    jobParams.disableLiteralCompression = params.disableLiteralCompression;
+
+    return jobParams;
+}
+
+/*! ZSTDMT_updateCParams_whileCompressing() :
+ *  Updates only a selected set of compression parameters, to remain compatible with current frame.
+ *  New parameters will be applied to next compression job. */
+void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams)
+{
+    U32 const saved_wlog = mtctx->params.cParams.windowLog;   /* Do not modify windowLog while compressing */
+    int const compressionLevel = cctxParams->compressionLevel;
+    DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)",
+                compressionLevel);
+    mtctx->params.compressionLevel = compressionLevel;
+    {   ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, 0, 0);
+        cParams.windowLog = saved_wlog;
+        mtctx->params.cParams = cParams;
+    }
+}
+
+/* ZSTDMT_getNbWorkers():
+ * @return nb threads currently active in mtctx.
+ * mtctx must be valid */
+unsigned ZSTDMT_getNbWorkers(const ZSTDMT_CCtx* mtctx)
+{
+    assert(mtctx != NULL);
+    return mtctx->params.nbWorkers;
+}
+
+/* ZSTDMT_getFrameProgression():
+ * tells how much data has been consumed (input) and produced (output) for current frame.
+ * able to count progression inside worker threads.
+ * Note : mutex will be acquired during statistics collection. */
+ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx)
+{
+    ZSTD_frameProgression fps;
+    DEBUGLOG(6, "ZSTDMT_getFrameProgression");
+    fps.consumed = mtctx->consumed;
+    fps.produced = mtctx->produced;
+    fps.ingested = mtctx->consumed + mtctx->inBuff.filled;
+    {   unsigned jobNb;
+        unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1);
+        DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)",
+                    mtctx->doneJobID, lastJobNb, mtctx->jobReady)
+        for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) {
+            unsigned const wJobID = jobNb & mtctx->jobIDMask;
+            ZSTD_pthread_mutex_lock(&mtctx->jobs[wJobID].job_mutex);
+            {   size_t const cResult = mtctx->jobs[wJobID].cSize;
+                size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
+                fps.consumed += mtctx->jobs[wJobID].consumed;
+                fps.ingested += mtctx->jobs[wJobID].src.size;
+                fps.produced += produced;
+            }
+            ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
+        }
+    }
+    return fps;
+}
+
+
+/* ------------------------------------------ */
+/* =====   Multi-threaded compression   ===== */
+/* ------------------------------------------ */
+
+static size_t ZSTDMT_computeTargetJobLog(ZSTD_CCtx_params const params)
+{
+    if (params.ldmParams.enableLdm)
+        return MAX(21, params.cParams.chainLog + 4);
+    return MAX(20, params.cParams.windowLog + 2);
+}
+
+static size_t ZSTDMT_computeOverlapLog(ZSTD_CCtx_params const params)
+{
+    unsigned const overlapRLog = (params.overlapSizeLog>9) ? 0 : 9-params.overlapSizeLog;
+    if (params.ldmParams.enableLdm)
+        return (MIN(params.cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2) - overlapRLog);
+    return overlapRLog >= 9 ? 0 : (params.cParams.windowLog - overlapRLog);
+}
+
+static unsigned ZSTDMT_computeNbJobs(ZSTD_CCtx_params params, size_t srcSize, unsigned nbWorkers) {
+    assert(nbWorkers>0);
+    {   size_t const jobSizeTarget = (size_t)1 << ZSTDMT_computeTargetJobLog(params);
+        size_t const jobMaxSize = jobSizeTarget << 2;
+        size_t const passSizeMax = jobMaxSize * nbWorkers;
+        unsigned const multiplier = (unsigned)(srcSize / passSizeMax) + 1;
+        unsigned const nbJobsLarge = multiplier * nbWorkers;
+        unsigned const nbJobsMax = (unsigned)(srcSize / jobSizeTarget) + 1;
+        unsigned const nbJobsSmall = MIN(nbJobsMax, nbWorkers);
+        return (multiplier>1) ? nbJobsLarge : nbJobsSmall;
+}   }
+
+/* ZSTDMT_compress_advanced_internal() :
+ * This is a blocking function : it will only give back control to caller after finishing its compression job.
+ */
+static size_t ZSTDMT_compress_advanced_internal(
+                ZSTDMT_CCtx* mtctx,
+                void* dst, size_t dstCapacity,
+          const void* src, size_t srcSize,
+          const ZSTD_CDict* cdict,
+                ZSTD_CCtx_params params)
+{
+    ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(params);
+    size_t const overlapSize = (size_t)1 << ZSTDMT_computeOverlapLog(params);
+    unsigned const nbJobs = ZSTDMT_computeNbJobs(params, srcSize, params.nbWorkers);
+    size_t const proposedJobSize = (srcSize + (nbJobs-1)) / nbJobs;
+    size_t const avgJobSize = (((proposedJobSize-1) & 0x1FFFF) < 0x7FFF) ? proposedJobSize + 0xFFFF : proposedJobSize;   /* avoid too small last block */
+    const char* const srcStart = (const char*)src;
+    size_t remainingSrcSize = srcSize;
+    unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ? nbJobs : (unsigned)(dstCapacity / ZSTD_compressBound(avgJobSize));  /* presumes avgJobSize >= 256 KB, which should be the case */
+    size_t frameStartPos = 0, dstBufferPos = 0;
+    assert(jobParams.nbWorkers == 0);
+    assert(mtctx->cctxPool->totalCCtx == params.nbWorkers);
+
+    params.jobSize = (U32)avgJobSize;
+    DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: nbJobs=%2u (rawSize=%u bytes; fixedSize=%u) ",
+                nbJobs, (U32)proposedJobSize, (U32)avgJobSize);
+
+    if ((nbJobs==1) | (params.nbWorkers<=1)) {   /* fallback to single-thread mode : this is a blocking invocation anyway */
+        ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0];
+        DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: fallback to single-thread mode");
+        if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, jobParams.fParams);
+        return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, jobParams);
+    }
+
+    assert(avgJobSize >= 256 KB);  /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */
+    ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgJobSize) );
+    if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params))
+        return ERROR(memory_allocation);
+
+    if (nbJobs > mtctx->jobIDMask+1) {  /* enlarge job table */
+        U32 jobsTableSize = nbJobs;
+        ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
+        mtctx->jobIDMask = 0;
+        mtctx->jobs = ZSTDMT_createJobsTable(&jobsTableSize, mtctx->cMem);
+        if (mtctx->jobs==NULL) return ERROR(memory_allocation);
+        assert((jobsTableSize != 0) && ((jobsTableSize & (jobsTableSize - 1)) == 0));  /* ensure jobsTableSize is a power of 2 */
+        mtctx->jobIDMask = jobsTableSize - 1;
+    }
+
+    {   unsigned u;
+        for (u=0; u<nbJobs; u++) {
+            size_t const jobSize = MIN(remainingSrcSize, avgJobSize);
+            size_t const dstBufferCapacity = ZSTD_compressBound(jobSize);
+            buffer_t const dstAsBuffer = { (char*)dst + dstBufferPos, dstBufferCapacity };
+            buffer_t const dstBuffer = u < compressWithinDst ? dstAsBuffer : g_nullBuffer;
+            size_t dictSize = u ? overlapSize : 0;
+
+            mtctx->jobs[u].prefix.start = srcStart + frameStartPos - dictSize;
+            mtctx->jobs[u].prefix.size = dictSize;
+            mtctx->jobs[u].src.start = srcStart + frameStartPos;
+            mtctx->jobs[u].src.size = jobSize; assert(jobSize > 0);  /* avoid job.src.size == 0 */
+            mtctx->jobs[u].consumed = 0;
+            mtctx->jobs[u].cSize = 0;
+            mtctx->jobs[u].cdict = (u==0) ? cdict : NULL;
+            mtctx->jobs[u].fullFrameSize = srcSize;
+            mtctx->jobs[u].params = jobParams;
+            /* do not calculate checksum within sections, but write it in header for first section */
+            mtctx->jobs[u].dstBuff = dstBuffer;
+            mtctx->jobs[u].cctxPool = mtctx->cctxPool;
+            mtctx->jobs[u].bufPool = mtctx->bufPool;
+            mtctx->jobs[u].seqPool = mtctx->seqPool;
+            mtctx->jobs[u].serial = &mtctx->serial;
+            mtctx->jobs[u].jobID = u;
+            mtctx->jobs[u].firstJob = (u==0);
+            mtctx->jobs[u].lastJob = (u==nbJobs-1);
+
+            DEBUGLOG(5, "ZSTDMT_compress_advanced_internal: posting job %u  (%u bytes)", u, (U32)jobSize);
+            DEBUG_PRINTHEX(6, mtctx->jobs[u].prefix.start, 12);
+            POOL_add(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[u]);
+
+            frameStartPos += jobSize;
+            dstBufferPos += dstBufferCapacity;
+            remainingSrcSize -= jobSize;
+    }   }
+
+    /* collect result */
+    {   size_t error = 0, dstPos = 0;
+        unsigned jobID;
+        for (jobID=0; jobID<nbJobs; jobID++) {
+            DEBUGLOG(5, "waiting for job %u ", jobID);
+            ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
+            while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
+                DEBUGLOG(5, "waiting for jobCompleted signal from job %u", jobID);
+                ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
+            }
+            ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
+            DEBUGLOG(5, "ready to write job %u ", jobID);
+
+            {   size_t const cSize = mtctx->jobs[jobID].cSize;
+                if (ZSTD_isError(cSize)) error = cSize;
+                if ((!error) && (dstPos + cSize > dstCapacity)) error = ERROR(dstSize_tooSmall);
+                if (jobID) {   /* note : job 0 is written directly at dst, which is correct position */
+                    if (!error)
+                        memmove((char*)dst + dstPos, mtctx->jobs[jobID].dstBuff.start, cSize);  /* may overlap when job compressed within dst */
+                    if (jobID >= compressWithinDst) {  /* job compressed into its own buffer, which must be released */
+                        DEBUGLOG(5, "releasing buffer %u>=%u", jobID, compressWithinDst);
+                        ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
+                }   }
+                mtctx->jobs[jobID].dstBuff = g_nullBuffer;
+                mtctx->jobs[jobID].cSize = 0;
+                dstPos += cSize ;
+            }
+        }  /* for (jobID=0; jobID<nbJobs; jobID++) */
+
+        DEBUGLOG(4, "checksumFlag : %u ", params.fParams.checksumFlag);
+        if (params.fParams.checksumFlag) {
+            U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);
+            if (dstPos + 4 > dstCapacity) {
+                error = ERROR(dstSize_tooSmall);
+            } else {
+                DEBUGLOG(4, "writing checksum : %08X \n", checksum);
+                MEM_writeLE32((char*)dst + dstPos, checksum);
+                dstPos += 4;
+        }   }
+
+        if (!error) DEBUGLOG(4, "compressed size : %u  ", (U32)dstPos);
+        return error ? error : dstPos;
+    }
+}
+
+size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
+                               void* dst, size_t dstCapacity,
+                         const void* src, size_t srcSize,
+                         const ZSTD_CDict* cdict,
+                               ZSTD_parameters params,
+                               unsigned overlapLog)
+{
+    ZSTD_CCtx_params cctxParams = mtctx->params;
+    cctxParams.cParams = params.cParams;
+    cctxParams.fParams = params.fParams;
+    cctxParams.overlapSizeLog = overlapLog;
+    return ZSTDMT_compress_advanced_internal(mtctx,
+                                             dst, dstCapacity,
+                                             src, srcSize,
+                                             cdict, cctxParams);
+}
+
+
+size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
+                           void* dst, size_t dstCapacity,
+                     const void* src, size_t srcSize,
+                           int compressionLevel)
+{
+    U32 const overlapLog = (compressionLevel >= ZSTD_maxCLevel()) ? 9 : ZSTDMT_OVERLAPLOG_DEFAULT;
+    ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0);
+    params.fParams.contentSizeFlag = 1;
+    return ZSTDMT_compress_advanced(mtctx, dst, dstCapacity, src, srcSize, NULL, params, overlapLog);
+}
+
+
+/* ====================================== */
+/* =======      Streaming API     ======= */
+/* ====================================== */
+
+size_t ZSTDMT_initCStream_internal(
+        ZSTDMT_CCtx* mtctx,
+        const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
+        const ZSTD_CDict* cdict, ZSTD_CCtx_params params,
+        unsigned long long pledgedSrcSize)
+{
+    DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u, disableLiteralCompression=%i)",
+                (U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx, params.disableLiteralCompression);
+    /* params are supposed to be fully validated at this point */
+    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
+    assert(!((dict) && (cdict)));  /* either dict or cdict, not both */
+    assert(mtctx->cctxPool->totalCCtx == params.nbWorkers);
+
+    /* init */
+    if (params.jobSize == 0) {
+        params.jobSize = 1U << ZSTDMT_computeTargetJobLog(params);
+    }
+    if (params.jobSize > ZSTDMT_JOBSIZE_MAX) params.jobSize = ZSTDMT_JOBSIZE_MAX;
+
+    mtctx->singleBlockingThread = (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN);  /* do not trigger multi-threading when srcSize is too small */
+    if (mtctx->singleBlockingThread) {
+        ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(params);
+        DEBUGLOG(5, "ZSTDMT_initCStream_internal: switch to single blocking thread mode");
+        assert(singleThreadParams.nbWorkers == 0);
+        return ZSTD_initCStream_internal(mtctx->cctxPool->cctx[0],
+                                         dict, dictSize, cdict,
+                                         singleThreadParams, pledgedSrcSize);
+    }
+
+    DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers);
+
+    if (mtctx->allJobsCompleted == 0) {   /* previous compression not correctly finished */
+        ZSTDMT_waitForAllJobsCompleted(mtctx);
+        ZSTDMT_releaseAllJobResources(mtctx);
+        mtctx->allJobsCompleted = 1;
+    }
+
+    mtctx->params = params;
+    mtctx->frameContentSize = pledgedSrcSize;
+    if (dict) {
+        ZSTD_freeCDict(mtctx->cdictLocal);
+        mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
+                                                    ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */
+                                                    params.cParams, mtctx->cMem);
+        mtctx->cdict = mtctx->cdictLocal;
+        if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation);
+    } else {
+        ZSTD_freeCDict(mtctx->cdictLocal);
+        mtctx->cdictLocal = NULL;
+        mtctx->cdict = cdict;
+    }
+
+    mtctx->targetPrefixSize = (size_t)1 << ZSTDMT_computeOverlapLog(params);
+    DEBUGLOG(4, "overlapLog=%u => %u KB", params.overlapSizeLog, (U32)(mtctx->targetPrefixSize>>10));
+    mtctx->targetSectionSize = params.jobSize;
+    if (mtctx->targetSectionSize < ZSTDMT_JOBSIZE_MIN) mtctx->targetSectionSize = ZSTDMT_JOBSIZE_MIN;
+    if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize;  /* job size must be >= overlap size */
+    DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), params.jobSize);
+    DEBUGLOG(4, "inBuff Size : %u KB", (U32)(mtctx->targetSectionSize>>10));
+    ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize));
+    {
+        /* If ldm is enabled we need windowSize space. */
+        size_t const windowSize = mtctx->params.ldmParams.enableLdm ? (1U << mtctx->params.cParams.windowLog) : 0;
+        /* Two buffers of slack, plus extra space for the overlap
+         * This is the minimum slack that LDM works with. One extra because
+         * flush might waste up to targetSectionSize-1 bytes. Another extra
+         * for the overlap (if > 0), then one to fill which doesn't overlap
+         * with the LDM window.
+         */
+        size_t const nbSlackBuffers = 2 + (mtctx->targetPrefixSize > 0);
+        size_t const slackSize = mtctx->targetSectionSize * nbSlackBuffers;
+        /* Compute the total size, and always have enough slack */
+        size_t const nbWorkers = MAX(mtctx->params.nbWorkers, 1);
+        size_t const sectionsSize = mtctx->targetSectionSize * nbWorkers;
+        size_t const capacity = MAX(windowSize, sectionsSize) + slackSize;
+        if (mtctx->roundBuff.capacity < capacity) {
+            if (mtctx->roundBuff.buffer)
+                ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem);
+            mtctx->roundBuff.buffer = (BYTE*)ZSTD_malloc(capacity, mtctx->cMem);
+            if (mtctx->roundBuff.buffer == NULL) {
+                mtctx->roundBuff.capacity = 0;
+                return ERROR(memory_allocation);
+            }
+            mtctx->roundBuff.capacity = capacity;
+        }
+    }
+    DEBUGLOG(4, "roundBuff capacity : %u KB", (U32)(mtctx->roundBuff.capacity>>10));
+    mtctx->roundBuff.pos = 0;
+    mtctx->inBuff.buffer = g_nullBuffer;
+    mtctx->inBuff.filled = 0;
+    mtctx->inBuff.prefix = kNullRange;
+    mtctx->doneJobID = 0;
+    mtctx->nextJobID = 0;
+    mtctx->frameEnded = 0;
+    mtctx->allJobsCompleted = 0;
+    mtctx->consumed = 0;
+    mtctx->produced = 0;
+    if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params))
+        return ERROR(memory_allocation);
+    return 0;
+}
+
+size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
+                             const void* dict, size_t dictSize,
+                                   ZSTD_parameters params,
+                                   unsigned long long pledgedSrcSize)
+{
+    ZSTD_CCtx_params cctxParams = mtctx->params;  /* retrieve sticky params */
+    DEBUGLOG(4, "ZSTDMT_initCStream_advanced (pledgedSrcSize=%u)", (U32)pledgedSrcSize);
+    cctxParams.cParams = params.cParams;
+    cctxParams.fParams = params.fParams;
+    return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, ZSTD_dct_auto, NULL,
+                                       cctxParams, pledgedSrcSize);
+}
+
+size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
+                               const ZSTD_CDict* cdict,
+                                     ZSTD_frameParameters fParams,
+                                     unsigned long long pledgedSrcSize)
+{
+    ZSTD_CCtx_params cctxParams = mtctx->params;
+    if (cdict==NULL) return ERROR(dictionary_wrong);   /* method incompatible with NULL cdict */
+    cctxParams.cParams = ZSTD_getCParamsFromCDict(cdict);
+    cctxParams.fParams = fParams;
+    return ZSTDMT_initCStream_internal(mtctx, NULL, 0 /*dictSize*/, ZSTD_dct_auto, cdict,
+                                       cctxParams, pledgedSrcSize);
+}
+
+
+/* ZSTDMT_resetCStream() :
+ * pledgedSrcSize can be zero == unknown (for the time being)
+ * prefer using ZSTD_CONTENTSIZE_UNKNOWN,
+ * as `0` might mean "empty" in the future */
+size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize)
+{
+    if (!pledgedSrcSize) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
+    return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, 0, mtctx->params,
+                                       pledgedSrcSize);
+}
+
+size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel) {
+    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0);
+    ZSTD_CCtx_params cctxParams = mtctx->params;   /* retrieve sticky params */
+    DEBUGLOG(4, "ZSTDMT_initCStream (cLevel=%i)", compressionLevel);
+    cctxParams.cParams = params.cParams;
+    cctxParams.fParams = params.fParams;
+    return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN);
+}
+
+
+/* ZSTDMT_writeLastEmptyBlock()
+ * Write a single empty block with an end-of-frame to finish a frame.
+ * Job must be created from streaming variant.
+ * This function is always successfull if expected conditions are fulfilled.
+ */
+static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job)
+{
+    assert(job->lastJob == 1);
+    assert(job->src.size == 0);   /* last job is empty -> will be simplified into a last empty block */
+    assert(job->firstJob == 0);   /* cannot be first job, as it also needs to create frame header */
+    assert(job->dstBuff.start == NULL);   /* invoked from streaming variant only (otherwise, dstBuff might be user's output) */
+    job->dstBuff = ZSTDMT_getBuffer(job->bufPool);
+    if (job->dstBuff.start == NULL) {
+      job->cSize = ERROR(memory_allocation);
+      return;
+    }
+    assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize);   /* no buffer should ever be that small */
+    job->src = kNullRange;
+    job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity);
+    assert(!ZSTD_isError(job->cSize));
+    assert(job->consumed == 0);
+}
+
+static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZSTD_EndDirective endOp)
+{
+    unsigned const jobID = mtctx->nextJobID & mtctx->jobIDMask;
+    int const endFrame = (endOp == ZSTD_e_end);
+
+    if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) {
+        DEBUGLOG(5, "ZSTDMT_createCompressionJob: will not create new job : table is full");
+        assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask));
+        return 0;
+    }
+
+    if (!mtctx->jobReady) {
+        BYTE const* src = (BYTE const*)mtctx->inBuff.buffer.start;
+        DEBUGLOG(5, "ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload ",
+                    mtctx->nextJobID, (U32)srcSize, (U32)mtctx->inBuff.prefix.size);
+        mtctx->jobs[jobID].src.start = src;
+        mtctx->jobs[jobID].src.size = srcSize;
+        assert(mtctx->inBuff.filled >= srcSize);
+        mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix;
+        mtctx->jobs[jobID].consumed = 0;
+        mtctx->jobs[jobID].cSize = 0;
+        mtctx->jobs[jobID].params = mtctx->params;
+        mtctx->jobs[jobID].cdict = mtctx->nextJobID==0 ? mtctx->cdict : NULL;
+        mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize;
+        mtctx->jobs[jobID].dstBuff = g_nullBuffer;
+        mtctx->jobs[jobID].cctxPool = mtctx->cctxPool;
+        mtctx->jobs[jobID].bufPool = mtctx->bufPool;
+        mtctx->jobs[jobID].seqPool = mtctx->seqPool;
+        mtctx->jobs[jobID].serial = &mtctx->serial;
+        mtctx->jobs[jobID].jobID = mtctx->nextJobID;
+        mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0);
+        mtctx->jobs[jobID].lastJob = endFrame;
+        mtctx->jobs[jobID].frameChecksumNeeded = endFrame && (mtctx->nextJobID>0) && mtctx->params.fParams.checksumFlag;
+        mtctx->jobs[jobID].dstFlushed = 0;
+
+        /* Update the round buffer pos and clear the input buffer to be reset */
+        mtctx->roundBuff.pos += srcSize;
+        mtctx->inBuff.buffer = g_nullBuffer;
+        mtctx->inBuff.filled = 0;
+        /* Set the prefix */
+        if (!endFrame) {
+            size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize);
+            mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize;
+            mtctx->inBuff.prefix.size = newPrefixSize;
+        } else {   /* endFrame==1 => no need for another input buffer */
+            mtctx->inBuff.prefix = kNullRange;
+            mtctx->frameEnded = endFrame;
+            if (mtctx->nextJobID == 0) {
+                /* single job exception : checksum is already calculated directly within worker thread */
+                mtctx->params.fParams.checksumFlag = 0;
+        }   }
+
+        if ( (srcSize == 0)
+          && (mtctx->nextJobID>0)/*single job must also write frame header*/ ) {
+            DEBUGLOG(5, "ZSTDMT_createCompressionJob: creating a last empty block to end frame");
+            assert(endOp == ZSTD_e_end);  /* only possible case : need to end the frame with an empty last block */
+            ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID);
+            mtctx->nextJobID++;
+            return 0;
+        }
+    }
+
+    DEBUGLOG(5, "ZSTDMT_createCompressionJob: posting job %u : %u bytes  (end:%u, jobNb == %u (mod:%u))",
+                mtctx->nextJobID,
+                (U32)mtctx->jobs[jobID].src.size,
+                mtctx->jobs[jobID].lastJob,
+                mtctx->nextJobID,
+                jobID);
+    if (POOL_tryAdd(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[jobID])) {
+        mtctx->nextJobID++;
+        mtctx->jobReady = 0;
+    } else {
+        DEBUGLOG(5, "ZSTDMT_createCompressionJob: no worker available for job %u", mtctx->nextJobID);
+        mtctx->jobReady = 1;
+    }
+    return 0;
+}
+
+
+/*! ZSTDMT_flushProduced() :
+ * `output` : `pos` will be updated with amount of data flushed .
+ * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush .
+ * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */
+static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, unsigned blockToFlush, ZSTD_EndDirective end)
+{
+    unsigned const wJobID = mtctx->doneJobID & mtctx->jobIDMask;
+    DEBUGLOG(5, "ZSTDMT_flushProduced (blocking:%u , job %u <= %u)",
+                blockToFlush, mtctx->doneJobID, mtctx->nextJobID);
+    assert(output->size >= output->pos);
+
+    ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
+    if (  blockToFlush
+      && (mtctx->doneJobID < mtctx->nextJobID) ) {
+        assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize);
+        while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) {  /* nothing to flush */
+            if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) {
+                DEBUGLOG(5, "job %u is completely consumed (%u == %u) => don't wait for cond, there will be none",
+                            mtctx->doneJobID, (U32)mtctx->jobs[wJobID].consumed, (U32)mtctx->jobs[wJobID].src.size);
+                break;
+            }
+            DEBUGLOG(5, "waiting for something to flush from job %u (currently flushed: %u bytes)",
+                        mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
+            ZSTD_pthread_cond_wait(&mtctx->jobs[wJobID].job_cond, &mtctx->jobs[wJobID].job_mutex);  /* block when nothing to flush but some to come */
+    }   }
+
+    /* try to flush something */
+    {   size_t cSize = mtctx->jobs[wJobID].cSize;                  /* shared */
+        size_t const srcConsumed = mtctx->jobs[wJobID].consumed;   /* shared */
+        size_t const srcSize = mtctx->jobs[wJobID].src.size;        /* read-only, could be done after mutex lock, but no-declaration-after-statement */
+        ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
+        if (ZSTD_isError(cSize)) {
+            DEBUGLOG(5, "ZSTDMT_flushProduced: job %u : compression error detected : %s",
+                        mtctx->doneJobID, ZSTD_getErrorName(cSize));
+            ZSTDMT_waitForAllJobsCompleted(mtctx);
+            ZSTDMT_releaseAllJobResources(mtctx);
+            return cSize;
+        }
+        /* add frame checksum if necessary (can only happen once) */
+        assert(srcConsumed <= srcSize);
+        if ( (srcConsumed == srcSize)   /* job completed -> worker no longer active */
+          && mtctx->jobs[wJobID].frameChecksumNeeded ) {
+            U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);
+            DEBUGLOG(4, "ZSTDMT_flushProduced: writing checksum : %08X \n", checksum);
+            MEM_writeLE32((char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum);
+            cSize += 4;
+            mtctx->jobs[wJobID].cSize += 4;  /* can write this shared value, as worker is no longer active */
+            mtctx->jobs[wJobID].frameChecksumNeeded = 0;
+        }
+        if (cSize > 0) {   /* compression is ongoing or completed */
+            size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos);
+            DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)",
+                        (U32)toFlush, mtctx->doneJobID, (U32)srcConsumed, (U32)srcSize, (U32)cSize);
+            assert(mtctx->doneJobID < mtctx->nextJobID);
+            assert(cSize >= mtctx->jobs[wJobID].dstFlushed);
+            assert(mtctx->jobs[wJobID].dstBuff.start != NULL);
+            memcpy((char*)output->dst + output->pos,
+                   (const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed,
+                   toFlush);
+            output->pos += toFlush;
+            mtctx->jobs[wJobID].dstFlushed += toFlush;  /* can write : this value is only used by mtctx */
+
+            if ( (srcConsumed == srcSize)    /* job completed */
+              && (mtctx->jobs[wJobID].dstFlushed == cSize) ) {   /* output buffer fully flushed => free this job position */
+                DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one",
+                        mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
+                ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff);
+                mtctx->jobs[wJobID].dstBuff = g_nullBuffer;
+                mtctx->jobs[wJobID].cSize = 0;   /* ensure this job slot is considered "not started" in future check */
+                mtctx->consumed += srcSize;
+                mtctx->produced += cSize;
+                mtctx->doneJobID++;
+        }   }
+
+        /* return value : how many bytes left in buffer ; fake it to 1 when unknown but >0 */
+        if (cSize > mtctx->jobs[wJobID].dstFlushed) return (cSize - mtctx->jobs[wJobID].dstFlushed);
+        if (srcSize > srcConsumed) return 1;   /* current job not completely compressed */
+    }
+    if (mtctx->doneJobID < mtctx->nextJobID) return 1;   /* some more jobs ongoing */
+    if (mtctx->jobReady) return 1;      /* one job is ready to push, just not yet in the list */
+    if (mtctx->inBuff.filled > 0) return 1;   /* input is not empty, and still needs to be converted into a job */
+    mtctx->allJobsCompleted = mtctx->frameEnded;   /* all jobs are entirely flushed => if this one is last one, frame is completed */
+    if (end == ZSTD_e_end) return !mtctx->frameEnded;  /* for ZSTD_e_end, question becomes : is frame completed ? instead of : are internal buffers fully flushed ? */
+    return 0;   /* internal buffers fully flushed */
+}
+
+/**
+ * Returns the range of data used by the earliest job that is not yet complete.
+ * If the data of the first job is broken up into two segments, we cover both
+ * sections.
+ */
+static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx)
+{
+    unsigned const firstJobID = mtctx->doneJobID;
+    unsigned const lastJobID = mtctx->nextJobID;
+    unsigned jobID;
+
+    for (jobID = firstJobID; jobID < lastJobID; ++jobID) {
+        unsigned const wJobID = jobID & mtctx->jobIDMask;
+        size_t consumed;
+
+        ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
+        consumed = mtctx->jobs[wJobID].consumed;
+        ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
+
+        if (consumed < mtctx->jobs[wJobID].src.size) {
+            range_t range = mtctx->jobs[wJobID].prefix;
+            if (range.size == 0) {
+                /* Empty prefix */
+                range = mtctx->jobs[wJobID].src;
+            }
+            /* Job source in multiple segments not supported yet */
+            assert(range.start <= mtctx->jobs[wJobID].src.start);
+            return range;
+        }
+    }
+    return kNullRange;
+}
+
+/**
+ * Returns non-zero iff buffer and range overlap.
+ */
+static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range)
+{
+    BYTE const* const bufferStart = (BYTE const*)buffer.start;
+    BYTE const* const bufferEnd = bufferStart + buffer.capacity;
+    BYTE const* const rangeStart = (BYTE const*)range.start;
+    BYTE const* const rangeEnd = rangeStart + range.size;
+
+    if (rangeStart == NULL || bufferStart == NULL)
+        return 0;
+    /* Empty ranges cannot overlap */
+    if (bufferStart == bufferEnd || rangeStart == rangeEnd)
+        return 0;
+
+    return bufferStart < rangeEnd && rangeStart < bufferEnd;
+}
+
+static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window)
+{
+    range_t extDict;
+    range_t prefix;
+
+    extDict.start = window.dictBase + window.lowLimit;
+    extDict.size = window.dictLimit - window.lowLimit;
+
+    prefix.start = window.base + window.dictLimit;
+    prefix.size = window.nextSrc - (window.base + window.dictLimit);
+    DEBUGLOG(5, "extDict [0x%zx, 0x%zx)",
+                (size_t)extDict.start,
+                (size_t)extDict.start + extDict.size);
+    DEBUGLOG(5, "prefix  [0x%zx, 0x%zx)",
+                (size_t)prefix.start,
+                (size_t)prefix.start + prefix.size);
+
+    return ZSTDMT_isOverlapped(buffer, extDict)
+        || ZSTDMT_isOverlapped(buffer, prefix);
+}
+
+static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer)
+{
+    if (mtctx->params.ldmParams.enableLdm) {
+        ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex;
+        DEBUGLOG(5, "source  [0x%zx, 0x%zx)",
+                    (size_t)buffer.start,
+                    (size_t)buffer.start + buffer.capacity);
+        ZSTD_PTHREAD_MUTEX_LOCK(mutex);
+        while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) {
+            DEBUGLOG(6, "Waiting for LDM to finish...");
+            ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex);
+        }
+        DEBUGLOG(6, "Done waiting for LDM to finish");
+        ZSTD_pthread_mutex_unlock(mutex);
+    }
+}
+
+/**
+ * Attempts to set the inBuff to the next section to fill.
+ * If any part of the new section is still in use we give up.
+ * Returns non-zero if the buffer is filled.
+ */
+static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
+{
+    range_t const inUse = ZSTDMT_getInputDataInUse(mtctx);
+    size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos;
+    size_t const target = mtctx->targetSectionSize;
+    buffer_t buffer;
+
+    assert(mtctx->inBuff.buffer.start == NULL);
+    assert(mtctx->roundBuff.capacity >= target);
+
+    if (spaceLeft < target) {
+        /* ZSTD_invalidateRepCodes() doesn't work for extDict variants.
+         * Simply copy the prefix to the beginning in that case.
+         */
+        BYTE* const start = (BYTE*)mtctx->roundBuff.buffer;
+        size_t const prefixSize = mtctx->inBuff.prefix.size;
+
+        buffer.start = start;
+        buffer.capacity = prefixSize;
+        if (ZSTDMT_isOverlapped(buffer, inUse)) {
+            DEBUGLOG(6, "Waiting for buffer...");
+            return 0;
+        }
+        ZSTDMT_waitForLdmComplete(mtctx, buffer);
+        memmove(start, mtctx->inBuff.prefix.start, prefixSize);
+        mtctx->inBuff.prefix.start = start;
+        mtctx->roundBuff.pos = prefixSize;
+    }
+    buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos;
+    buffer.capacity = target;
+
+    if (ZSTDMT_isOverlapped(buffer, inUse)) {
+        DEBUGLOG(6, "Waiting for buffer...");
+        return 0;
+    }
+    assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix));
+
+    ZSTDMT_waitForLdmComplete(mtctx, buffer);
+
+    DEBUGLOG(5, "Using prefix range [%zx, %zx)",
+                (size_t)mtctx->inBuff.prefix.start,
+                (size_t)mtctx->inBuff.prefix.start + mtctx->inBuff.prefix.size);
+    DEBUGLOG(5, "Using source range [%zx, %zx)",
+                (size_t)buffer.start,
+                (size_t)buffer.start + buffer.capacity);
+
+
+    mtctx->inBuff.buffer = buffer;
+    mtctx->inBuff.filled = 0;
+    assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity);
+    return 1;
+}
+
+
+/** ZSTDMT_compressStream_generic() :
+ *  internal use only - exposed to be invoked from zstd_compress.c
+ *  assumption : output and input are valid (pos <= size)
+ * @return : minimum amount of data remaining to flush, 0 if none */
+size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
+                                     ZSTD_outBuffer* output,
+                                     ZSTD_inBuffer* input,
+                                     ZSTD_EndDirective endOp)
+{
+    unsigned forwardInputProgress = 0;
+    DEBUGLOG(5, "ZSTDMT_compressStream_generic (endOp=%u, srcSize=%u)",
+                (U32)endOp, (U32)(input->size - input->pos));
+    assert(output->pos <= output->size);
+    assert(input->pos  <= input->size);
+
+    if (mtctx->singleBlockingThread) {  /* delegate to single-thread (synchronous) */
+        return ZSTD_compressStream_generic(mtctx->cctxPool->cctx[0], output, input, endOp);
+    }
+
+    if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
+        /* current frame being ended. Only flush/end are allowed */
+        return ERROR(stage_wrong);
+    }
+
+    /* single-pass shortcut (note : synchronous-mode) */
+    if ( (mtctx->nextJobID == 0)      /* just started */
+      && (mtctx->inBuff.filled == 0)  /* nothing buffered */
+      && (!mtctx->jobReady)           /* no job already created */
+      && (endOp == ZSTD_e_end)        /* end order */
+      && (output->size - output->pos >= ZSTD_compressBound(input->size - input->pos)) ) { /* enough space in dst */
+        size_t const cSize = ZSTDMT_compress_advanced_internal(mtctx,
+                (char*)output->dst + output->pos, output->size - output->pos,
+                (const char*)input->src + input->pos, input->size - input->pos,
+                mtctx->cdict, mtctx->params);
+        if (ZSTD_isError(cSize)) return cSize;
+        input->pos = input->size;
+        output->pos += cSize;
+        mtctx->allJobsCompleted = 1;
+        mtctx->frameEnded = 1;
+        return 0;
+    }
+
+    /* fill input buffer */
+    if ( (!mtctx->jobReady)
+      && (input->size > input->pos) ) {   /* support NULL input */
+        if (mtctx->inBuff.buffer.start == NULL) {
+            assert(mtctx->inBuff.filled == 0); /* Can't fill an empty buffer */
+            if (!ZSTDMT_tryGetInputRange(mtctx)) {
+                /* It is only possible for this operation to fail if there are
+                 * still compression jobs ongoing.
+                 */
+                assert(mtctx->doneJobID != mtctx->nextJobID);
+            }
+        }
+        if (mtctx->inBuff.buffer.start != NULL) {
+            size_t const toLoad = MIN(input->size - input->pos, mtctx->targetSectionSize - mtctx->inBuff.filled);
+            assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize);
+            DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u",
+                        (U32)toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize);
+            memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, toLoad);
+            input->pos += toLoad;
+            mtctx->inBuff.filled += toLoad;
+            forwardInputProgress = toLoad>0;
+        }
+        if ((input->pos < input->size) && (endOp == ZSTD_e_end))
+            endOp = ZSTD_e_flush;   /* can't end now : not all input consumed */
+    }
+
+    if ( (mtctx->jobReady)
+      || (mtctx->inBuff.filled >= mtctx->targetSectionSize)  /* filled enough : let's compress */
+      || ((endOp != ZSTD_e_continue) && (mtctx->inBuff.filled > 0))  /* something to flush : let's go */
+      || ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) {   /* must finish the frame with a zero-size block */
+        size_t const jobSize = mtctx->inBuff.filled;
+        assert(mtctx->inBuff.filled <= mtctx->targetSectionSize);
+        CHECK_F( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) );
+    }
+
+    /* check for potential compressed data ready to be flushed */
+    {   size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */
+        if (input->pos < input->size) return MAX(remainingToFlush, 1);  /* input not consumed : do not end flush yet */
+        return remainingToFlush;
+    }
+}
+
+
+size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
+{
+    CHECK_F( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) );
+
+    /* recommended next input size : fill current input buffer */
+    return mtctx->targetSectionSize - mtctx->inBuff.filled;   /* note : could be zero when input buffer is fully filled and no more availability to create new job */
+}
+
+
+static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_EndDirective endFrame)
+{
+    size_t const srcSize = mtctx->inBuff.filled;
+    DEBUGLOG(5, "ZSTDMT_flushStream_internal");
+
+    if ( mtctx->jobReady     /* one job ready for a worker to pick up */
+      || (srcSize > 0)       /* still some data within input buffer */
+      || ((endFrame==ZSTD_e_end) && !mtctx->frameEnded)) {  /* need a last 0-size block to end frame */
+           DEBUGLOG(5, "ZSTDMT_flushStream_internal : create a new job (%u bytes, end:%u)",
+                        (U32)srcSize, (U32)endFrame);
+        CHECK_F( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) );
+    }
+
+    /* check if there is any data available to flush */
+    return ZSTDMT_flushProduced(mtctx, output, 1 /* blockToFlush */, endFrame);
+}
+
+
+size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output)
+{
+    DEBUGLOG(5, "ZSTDMT_flushStream");
+    if (mtctx->singleBlockingThread)
+        return ZSTD_flushStream(mtctx->cctxPool->cctx[0], output);
+    return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_flush);
+}
+
+size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output)
+{
+    DEBUGLOG(4, "ZSTDMT_endStream");
+    if (mtctx->singleBlockingThread)
+        return ZSTD_endStream(mtctx->cctxPool->cctx[0], output);
+    return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_end);
+}
diff --git a/vendor/github.com/DataDog/zstd/zstdmt_compress.h b/vendor/github.com/DataDog/zstd/zstdmt_compress.h
new file mode 100755
index 0000000..f79e3b4
--- /dev/null
+++ b/vendor/github.com/DataDog/zstd/zstdmt_compress.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This source code is licensed under both the BSD-style license (found in the
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
+ * in the COPYING file in the root directory of this source tree).
+ * You may select, at your option, one of the above-listed licenses.
+ */
+
+ #ifndef ZSTDMT_COMPRESS_H
+ #define ZSTDMT_COMPRESS_H
+
+ #if defined (__cplusplus)
+ extern "C" {
+ #endif
+
+
+/* Note : This is an internal API.
+ *        Some methods are still exposed (ZSTDLIB_API),
+ *        because it used to be the only way to invoke MT compression.
+ *        Now, it's recommended to use ZSTD_compress_generic() instead.
+ *        These methods will stop being exposed in a future version */
+
+/* ===   Dependencies   === */
+#include <stddef.h>                /* size_t */
+#define ZSTD_STATIC_LINKING_ONLY   /* ZSTD_parameters */
+#include "zstd.h"            /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */
+
+
+/* ===   Memory management   === */
+typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;
+ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers);
+ZSTDLIB_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers,
+                                                    ZSTD_customMem cMem);
+ZSTDLIB_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);
+
+ZSTDLIB_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);
+
+
+/* ===   Simple one-pass compression function   === */
+
+ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
+                                       void* dst, size_t dstCapacity,
+                                 const void* src, size_t srcSize,
+                                       int compressionLevel);
+
+
+
+/* ===   Streaming functions   === */
+
+ZSTDLIB_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);
+ZSTDLIB_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize);  /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it will change in the future to mean "empty" */
+
+ZSTDLIB_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
+
+ZSTDLIB_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);   /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
+ZSTDLIB_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output);     /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
+
+
+/* ===   Advanced functions and parameters  === */
+
+#ifndef ZSTDMT_JOBSIZE_MIN
+#  define ZSTDMT_JOBSIZE_MIN (1U << 20)   /* 1 MB - Minimum size of each compression job */
+#endif
+
+ZSTDLIB_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
+                                           void* dst, size_t dstCapacity,
+                                     const void* src, size_t srcSize,
+                                     const ZSTD_CDict* cdict,
+                                           ZSTD_parameters params,
+                                           unsigned overlapLog);
+
+ZSTDLIB_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
+                                        const void* dict, size_t dictSize,   /* dict can be released after init, a local copy is preserved within zcs */
+                                        ZSTD_parameters params,
+                                        unsigned long long pledgedSrcSize);  /* pledgedSrcSize is optional and can be zero == unknown */
+
+ZSTDLIB_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
+                                        const ZSTD_CDict* cdict,
+                                        ZSTD_frameParameters fparams,
+                                        unsigned long long pledgedSrcSize);  /* note : zero means empty */
+
+/* ZSTDMT_parameter :
+ * List of parameters that can be set using ZSTDMT_setMTCtxParameter() */
+typedef enum {
+    ZSTDMT_p_jobSize,           /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */
+    ZSTDMT_p_overlapSectionLog  /* Each job may reload a part of previous job to enhance compressionr ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */
+} ZSTDMT_parameter;
+
+/* ZSTDMT_setMTCtxParameter() :
+ * allow setting individual parameters, one at a time, among a list of enums defined in ZSTDMT_parameter.
+ * The function must be called typically after ZSTD_createCCtx() but __before ZSTDMT_init*() !__
+ * Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions.
+ * @return : 0, or an error code (which can be tested using ZSTD_isError()) */
+ZSTDLIB_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, unsigned value);
+
+
+/*! ZSTDMT_compressStream_generic() :
+ *  Combines ZSTDMT_compressStream() with optional ZSTDMT_flushStream() or ZSTDMT_endStream()
+ *  depending on flush directive.
+ * @return : minimum amount of data still to be flushed
+ *           0 if fully flushed
+ *           or an error code
+ *  note : needs to be init using any ZSTD_initCStream*() variant */
+ZSTDLIB_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
+                                                ZSTD_outBuffer* output,
+                                                ZSTD_inBuffer* input,
+                                                ZSTD_EndDirective endOp);
+
+
+/* ========================================================
+ * ===  Private interface, for use by ZSTD_compress.c   ===
+ * ===  Not exposed in libzstd. Never invoke directly   ===
+ * ======================================================== */
+
+size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, unsigned value);
+
+/* ZSTDMT_CCtxParam_setNbWorkers()
+ * Set nbWorkers, and clamp it.
+ * Also reset jobSize and overlapLog */
+size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers);
+
+/*! ZSTDMT_updateCParams_whileCompressing() :
+ *  Updates only a selected set of compression parameters, to remain compatible with current frame.
+ *  New parameters will be applied to next compression job. */
+void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams);
+
+/* ZSTDMT_getNbWorkers():
+ * @return nb threads currently active in mtctx.
+ * mtctx must be valid */
+unsigned ZSTDMT_getNbWorkers(const ZSTDMT_CCtx* mtctx);
+
+/* ZSTDMT_getFrameProgression():
+ * tells how much data has been consumed (input) and produced (output) for current frame.
+ * able to count progression inside worker threads.
+ */
+ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx);
+
+
+/*! ZSTDMT_initCStream_internal() :
+ *  Private use only. Init streaming operation.
+ *  expects params to be valid.
+ *  must receive dict, or cdict, or none, but not both.
+ *  @return : 0, or an error code */
+size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
+                    const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
+                    const ZSTD_CDict* cdict,
+                    ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif   /* ZSTDMT_COMPRESS_H */
diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore
new file mode 100644
index 0000000..6e362e4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/.gitignore
@@ -0,0 +1,27 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+*.test
+
+# Folders
+_obj
+_test
+.vagrant
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+coverage.txt
+profile.out
diff --git a/vendor/github.com/Shopify/sarama/.travis.yml b/vendor/github.com/Shopify/sarama/.travis.yml
new file mode 100644
index 0000000..eb54a0d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/.travis.yml
@@ -0,0 +1,35 @@
+language: go
+go:
+- 1.10.x
+- 1.11.x
+
+env:
+  global:
+  - KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095
+  - TOXIPROXY_ADDR=http://localhost:8474
+  - KAFKA_INSTALL_ROOT=/home/travis/kafka
+  - KAFKA_HOSTNAME=localhost
+  - DEBUG=true
+  matrix:
+  - KAFKA_VERSION=1.1.1
+  - KAFKA_VERSION=2.0.1
+  - KAFKA_VERSION=2.1.0
+
+before_install:
+- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
+- vagrant/install_cluster.sh
+- vagrant/boot_cluster.sh
+- vagrant/create_topics.sh
+
+install: make install_dependencies
+
+script:
+- make test
+- make vet
+- make errcheck
+- if [[ "$TRAVIS_GO_VERSION" == 1.11* ]]; then make fmt; fi
+
+after_success:
+- bash <(curl -s https://codecov.io/bash)
+
+after_script: vagrant/halt_cluster.sh
diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md
new file mode 100644
index 0000000..afc8511
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md
@@ -0,0 +1,647 @@
+# Changelog
+
+#### Version 1.20.1 (2019-01-10)
+
+New Features:
+- Add optional replica id in offset request
+  ([1100](https://github.com/Shopify/sarama/pull/1100)).
+
+Improvements:
+- Implement DescribeConfigs Request + Response v1 & v2
+  ([1230](https://github.com/Shopify/sarama/pull/1230)).
+- Reuse compression objects
+  ([1185](https://github.com/Shopify/sarama/pull/1185)).
+- Switch from png to svg for GoDoc link in README
+  ([1243](https://github.com/Shopify/sarama/pull/1243)).
+- Fix typo in deprecation notice for FetchResponseBlock.Records
+  ([1242](https://github.com/Shopify/sarama/pull/1242)).
+- Fix typos in consumer metadata response file
+  ([1244](https://github.com/Shopify/sarama/pull/1244)).
+
+Bug Fixes:
+- Revert to individual msg retries for non-idempotent
+  ([1203](https://github.com/Shopify/sarama/pull/1203)).
+- Respect MaxMessageBytes limit for uncompressed messages
+  ([1141](https://github.com/Shopify/sarama/pull/1141)).
+
+#### Version 1.20.0 (2018-12-10)
+
+New Features:
+ - Add support for zstd compression
+   ([#1170](https://github.com/Shopify/sarama/pull/1170)).
+ - Add support for Idempotent Producer
+   ([#1152](https://github.com/Shopify/sarama/pull/1152)).
+ - Add support support for Kafka 2.1.0
+   ([#1229](https://github.com/Shopify/sarama/pull/1229)).
+ - Add support support for OffsetCommit request/response pairs versions v1 to v5
+   ([#1201](https://github.com/Shopify/sarama/pull/1201)).
+ - Add support support for OffsetFetch request/response pair up to version v5
+   ([#1198](https://github.com/Shopify/sarama/pull/1198)).
+
+Improvements:
+ - Export broker's Rack setting
+   ([#1173](https://github.com/Shopify/sarama/pull/1173)).
+ - Always use latest patch version of Go on CI
+   ([#1202](https://github.com/Shopify/sarama/pull/1202)).
+ - Add error codes 61 to 72
+   ([#1195](https://github.com/Shopify/sarama/pull/1195)).
+
+Bug Fixes:
+ - Fix build without cgo
+   ([#1182](https://github.com/Shopify/sarama/pull/1182)).
+ - Fix go vet suggestion in consumer group file
+   ([#1209](https://github.com/Shopify/sarama/pull/1209)).
+ - Fix typos in code and comments
+   ([#1228](https://github.com/Shopify/sarama/pull/1228)).
+
+#### Version 1.19.0 (2018-09-27)
+
+New Features:
+ - Implement a higher-level consumer group
+   ([#1099](https://github.com/Shopify/sarama/pull/1099)).
+
+Improvements:
+ - Add support for Go 1.11
+   ([#1176](https://github.com/Shopify/sarama/pull/1176)).
+
+Bug Fixes:
+ - Fix encoding of `MetadataResponse` with version 2 and higher
+   ([#1174](https://github.com/Shopify/sarama/pull/1174)).
+ - Fix race condition in mock async producer
+   ([#1174](https://github.com/Shopify/sarama/pull/1174)).
+
+#### Version 1.18.0 (2018-09-07)
+
+New Features:
+ - Make `Partitioner.RequiresConsistency` vary per-message
+   ([#1112](https://github.com/Shopify/sarama/pull/1112)).
+ - Add customizable partitioner
+   ([#1118](https://github.com/Shopify/sarama/pull/1118)).
+ - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`,
+   `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL`
+   ([#1055](https://github.com/Shopify/sarama/pull/1055)).
+
+Improvements:
+ - Add support for Kafka 2.0.0
+   ([#1149](https://github.com/Shopify/sarama/pull/1149)).
+ - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts
+   ([#1123](https://github.com/Shopify/sarama/pull/1123)).
+ - Simpler offset management
+   ([#1127](https://github.com/Shopify/sarama/pull/1127)).
+
+Bug Fixes:
+ - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka
+   ([#1110](https://github.com/Shopify/sarama/pull/1110)).
+ - Fix consumer block when response did not contain all the
+   expected topic/partition blocks
+   ([#1086](https://github.com/Shopify/sarama/pull/1086)).
+ - Fix consumer block when response contains only constrol messages
+   ([#1115](https://github.com/Shopify/sarama/pull/1115)).
+ - Add timeout config for ClusterAdmin requests
+   ([#1142](https://github.com/Shopify/sarama/pull/1142)).
+ - Add version check when producing message with headers
+   ([#1117](https://github.com/Shopify/sarama/pull/1117)).
+ - Fix `MetadataRequest` for empty list of topics
+   ([#1132](https://github.com/Shopify/sarama/pull/1132)).
+ - Fix producer topic metadata on-demand fetch when topic error happens in metadata response
+   ([#1125](https://github.com/Shopify/sarama/pull/1125)).
+
+#### Version 1.17.0 (2018-05-30)
+
+New Features:
+ - Add support for gzip compression levels
+   ([#1044](https://github.com/Shopify/sarama/pull/1044)).
+ - Add support for Metadata request/response pairs versions v1 to v5
+   ([#1047](https://github.com/Shopify/sarama/pull/1047),
+    [#1069](https://github.com/Shopify/sarama/pull/1069)).
+ - Add versioning to JoinGroup request/response pairs
+   ([#1098](https://github.com/Shopify/sarama/pull/1098))
+ - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs
+   ([#1065](https://github.com/Shopify/sarama/pull/1065),
+    [#1096](https://github.com/Shopify/sarama/pull/1096),
+    [#1027](https://github.com/Shopify/sarama/pull/1027)).
+ - Add `Controller()` method to Client interface
+   ([#1063](https://github.com/Shopify/sarama/pull/1063)).
+
+Improvements:
+ - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp
+   ([#1010](https://github.com/Shopify/sarama/pull/1010)).
+ - Expose missing protocol parts: `msgSet` and `recordBatch`
+   ([#1049](https://github.com/Shopify/sarama/pull/1049)).
+ - Add support for v1 DeleteTopics Request
+   ([#1052](https://github.com/Shopify/sarama/pull/1052)).
+ - Add support for Go 1.10
+   ([#1064](https://github.com/Shopify/sarama/pull/1064)).
+ - Claim support for Kafka 1.1.0
+   ([#1073](https://github.com/Shopify/sarama/pull/1073)).
+
+Bug Fixes:
+ - Fix FindCoordinatorResponse.encode to allow nil Coordinator
+   ([#1050](https://github.com/Shopify/sarama/pull/1050),
+    [#1051](https://github.com/Shopify/sarama/pull/1051)).
+ - Clear all metadata when we have the latest topic info
+   ([#1033](https://github.com/Shopify/sarama/pull/1033)).
+ - Make `PartitionConsumer.Close` idempotent
+   ([#1092](https://github.com/Shopify/sarama/pull/1092)).
+
+#### Version 1.16.0 (2018-02-12)
+
+New Features:
+ - Add support for the Create/Delete Topics request/response pairs
+   ([#1007](https://github.com/Shopify/sarama/pull/1007),
+    [#1008](https://github.com/Shopify/sarama/pull/1008)).
+ - Add support for the Describe/Create/Delete ACL request/response pairs
+   ([#1009](https://github.com/Shopify/sarama/pull/1009)).
+ - Add support for the five transaction-related request/response pairs
+   ([#1016](https://github.com/Shopify/sarama/pull/1016)).
+
+Improvements:
+ - Permit setting version on mock producer responses
+   ([#999](https://github.com/Shopify/sarama/pull/999)).
+ - Add `NewMockBrokerListener` helper for testing TLS connections
+   ([#1019](https://github.com/Shopify/sarama/pull/1019)).
+ - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB
+   which results in much higher throughput in most cases
+   ([#1024](https://github.com/Shopify/sarama/pull/1024)).
+ - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to
+   reduce CPU and memory usage when processing many partitions
+   ([#1028](https://github.com/Shopify/sarama/pull/1028)).
+ - Assign relative offsets to messages in the producer to save the brokers a
+   recompression pass
+   ([#1002](https://github.com/Shopify/sarama/pull/1002),
+    [#1015](https://github.com/Shopify/sarama/pull/1015)).
+
+Bug Fixes:
+ - Fix producing uncompressed batches with the new protocol format
+   ([#1032](https://github.com/Shopify/sarama/issues/1032)).
+ - Fix consuming compacted topics with the new protocol format
+   ([#1005](https://github.com/Shopify/sarama/issues/1005)).
+ - Fix consuming topics with a mix of protocol formats
+   ([#1021](https://github.com/Shopify/sarama/issues/1021)).
+ - Fix consuming when the broker includes multiple batches in a single response
+   ([#1022](https://github.com/Shopify/sarama/issues/1022)).
+ - Fix detection of `PartialTrailingMessage` when the partial message was
+   truncated before the magic value indicating its version
+   ([#1030](https://github.com/Shopify/sarama/pull/1030)).
+ - Fix expectation-checking in the mock of `SyncProducer.SendMessages`
+   ([#1035](https://github.com/Shopify/sarama/pull/1035)).
+
+#### Version 1.15.0 (2017-12-08)
+
+New Features:
+ - Claim official support for Kafka 1.0, though it did already work
+   ([#984](https://github.com/Shopify/sarama/pull/984)).
+ - Helper methods for Kafka version numbers to/from strings
+   ([#989](https://github.com/Shopify/sarama/pull/989)).
+ - Implement CreatePartitions request/response
+   ([#985](https://github.com/Shopify/sarama/pull/985)).
+
+Improvements:
+ - Add error codes 45-60
+   ([#986](https://github.com/Shopify/sarama/issues/986)).
+
+Bug Fixes:
+ - Fix slow consuming for certain Kafka 0.11/1.0 configurations
+   ([#982](https://github.com/Shopify/sarama/pull/982)).
+ - Correctly determine when a FetchResponse contains the new message format
+   ([#990](https://github.com/Shopify/sarama/pull/990)).
+ - Fix producing with multiple headers
+   ([#996](https://github.com/Shopify/sarama/pull/996)).
+ - Fix handling of truncated record batches
+   ([#998](https://github.com/Shopify/sarama/pull/998)).
+ - Fix leaking metrics when closing brokers
+   ([#991](https://github.com/Shopify/sarama/pull/991)).
+
+#### Version 1.14.0 (2017-11-13)
+
+New Features:
+ - Add support for the new Kafka 0.11 record-batch format, including the wire
+   protocol and the necessary behavioural changes in the producer and consumer.
+   Transactions and idempotency are not yet supported, but producing and
+   consuming should work with all the existing bells and whistles (batching,
+   compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta
+   of Arista Networks for this work. Part of
+   ([#901](https://github.com/Shopify/sarama/issues/901)).
+
+Bug Fixes:
+ - Fix encoding of ProduceResponse versions in test
+   ([#970](https://github.com/Shopify/sarama/pull/970)).
+ - Return partial replicas list when we have it
+   ([#975](https://github.com/Shopify/sarama/pull/975)).
+
+#### Version 1.13.0 (2017-10-04)
+
+New Features:
+ - Support for FetchRequest version 3
+   ([#905](https://github.com/Shopify/sarama/pull/905)).
+ - Permit setting version on mock FetchResponses
+   ([#939](https://github.com/Shopify/sarama/pull/939)).
+ - Add a configuration option to support storing only minimal metadata for
+   extremely large clusters
+   ([#937](https://github.com/Shopify/sarama/pull/937)).
+ - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets
+   ([#932](https://github.com/Shopify/sarama/pull/932)).
+
+Improvements:
+ - Provide the block-level timestamp when consuming compressed messages
+   ([#885](https://github.com/Shopify/sarama/issues/885)).
+ - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned
+   by the broker, which can be meaningful
+   ([#930](https://github.com/Shopify/sarama/pull/930)).
+ - Use a `Ticker` to reduce consumer timer overhead at the cost of higher
+   variance in the actual timeout
+   ([#933](https://github.com/Shopify/sarama/pull/933)).
+
+Bug Fixes:
+ - Gracefully handle messages with negative timestamps
+   ([#907](https://github.com/Shopify/sarama/pull/907)).
+ - Raise a proper error when encountering an unknown message version
+   ([#940](https://github.com/Shopify/sarama/pull/940)).
+
+#### Version 1.12.0 (2017-05-08)
+
+New Features:
+ - Added support for the `ApiVersions` request and response pair, and Kafka
+   version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note
+   that you still need to specify the Kafka version in the Sarama configuration
+   for the time being.
+ - Added a `Brokers` method to the Client which returns the complete set of
+   active brokers ([#813](https://github.com/Shopify/sarama/pull/813)).
+ - Added an `InSyncReplicas` method to the Client which returns the set of all
+   in-sync broker IDs for the given partition, now that the Kafka versions for
+   which this was misleading are no longer in our supported set
+   ([#872](https://github.com/Shopify/sarama/pull/872)).
+ - Added a `NewCustomHashPartitioner` method which allows constructing a hash
+   partitioner with a custom hash method in case the default (FNV-1a) is not
+   suitable
+   ([#837](https://github.com/Shopify/sarama/pull/837),
+    [#841](https://github.com/Shopify/sarama/pull/841)).
+
+Improvements:
+ - Recognize more Kafka error codes
+   ([#859](https://github.com/Shopify/sarama/pull/859)).
+
+Bug Fixes:
+ - Fix an issue where decoding a malformed FetchRequest would not return the
+   correct error ([#818](https://github.com/Shopify/sarama/pull/818)).
+ - Respect ordering of group protocols in JoinGroupRequests. This fix is
+   transparent if you're using the `AddGroupProtocol` or
+   `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from
+   the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols`
+   ([#812](https://github.com/Shopify/sarama/issues/812)).
+ - Fix an alignment-related issue with atomics on 32-bit architectures
+   ([#859](https://github.com/Shopify/sarama/pull/859)).
+
+#### Version 1.11.0 (2016-12-20)
+
+_Important:_ As of Sarama 1.11 it is necessary to set the config value of
+`Producer.Return.Successes` to true in order to use the SyncProducer. Previous
+versions would silently override this value when instantiating a SyncProducer
+which led to unexpected values and data races.
+
+New Features:
+ - Metrics! Thanks to Sébastien Launay for all his work on this feature
+   ([#701](https://github.com/Shopify/sarama/pull/701),
+    [#746](https://github.com/Shopify/sarama/pull/746),
+    [#766](https://github.com/Shopify/sarama/pull/766)).
+ - Add support for LZ4 compression
+   ([#786](https://github.com/Shopify/sarama/pull/786)).
+ - Add support for ListOffsetRequest v1 and Kafka 0.10.1
+   ([#775](https://github.com/Shopify/sarama/pull/775)).
+ - Added a `HighWaterMarks` method to the Consumer which aggregates the
+   `HighWaterMarkOffset` values of its child topic/partitions
+   ([#769](https://github.com/Shopify/sarama/pull/769)).
+
+Bug Fixes:
+ - Fixed producing when using timestamps, compression and Kafka 0.10
+   ([#759](https://github.com/Shopify/sarama/pull/759)).
+ - Added missing decoder methods to DescribeGroups response
+   ([#756](https://github.com/Shopify/sarama/pull/756)).
+ - Fix producer shutdown when `Return.Errors` is disabled
+   ([#787](https://github.com/Shopify/sarama/pull/787)).
+ - Don't mutate configuration in SyncProducer
+   ([#790](https://github.com/Shopify/sarama/pull/790)).
+ - Fix crash on SASL initialization failure
+   ([#795](https://github.com/Shopify/sarama/pull/795)).
+
+#### Version 1.10.1 (2016-08-30)
+
+Bug Fixes:
+ - Fix the documentation for `HashPartitioner` which was incorrect
+   ([#717](https://github.com/Shopify/sarama/pull/717)).
+ - Permit client creation even when it is limited by ACLs
+   ([#722](https://github.com/Shopify/sarama/pull/722)).
+ - Several fixes to the consumer timer optimization code, regressions introduced
+   in v1.10.0. Go's timers are finicky
+   ([#730](https://github.com/Shopify/sarama/pull/730),
+    [#733](https://github.com/Shopify/sarama/pull/733),
+    [#734](https://github.com/Shopify/sarama/pull/734)).
+ - Handle consuming compressed relative offsets with Kafka 0.10
+   ([#735](https://github.com/Shopify/sarama/pull/735)).
+
+#### Version 1.10.0 (2016-08-02)
+
+_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of
+Kafka you are running against (via the `config.Version` value) in order to use
+features that may not be compatible with old Kafka versions. If you don't
+specify this value it will default to 0.8.2 (the minimum supported), and trying
+to use more recent features (like the offset manager) will fail with an error.
+
+_Also:_ The offset-manager's behaviour has been changed to match the upstream
+java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and
+[#713](https://github.com/Shopify/sarama/pull/713)). If you use the
+offset-manager, please ensure that you are committing one *greater* than the
+last consumed message offset or else you may end up consuming duplicate
+messages.
+
+New Features:
+ - Support for Kafka 0.10
+   ([#672](https://github.com/Shopify/sarama/pull/672),
+    [#678](https://github.com/Shopify/sarama/pull/678),
+    [#681](https://github.com/Shopify/sarama/pull/681), and others).
+ - Support for configuring the target Kafka version
+   ([#676](https://github.com/Shopify/sarama/pull/676)).
+ - Batch producing support in the SyncProducer
+   ([#677](https://github.com/Shopify/sarama/pull/677)).
+ - Extend producer mock to allow setting expectations on message contents
+   ([#667](https://github.com/Shopify/sarama/pull/667)).
+
+Improvements:
+ - Support `nil` compressed messages for deleting in compacted topics
+   ([#634](https://github.com/Shopify/sarama/pull/634)).
+ - Pre-allocate decoding errors, greatly reducing heap usage and GC time against
+   misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)).
+ - Re-use consumer expiry timers, removing one allocation per consumed message
+   ([#707](https://github.com/Shopify/sarama/pull/707)).
+
+Bug Fixes:
+ - Actually default the client ID to "sarama" like we say we do
+   ([#664](https://github.com/Shopify/sarama/pull/664)).
+ - Fix a rare issue where `Client.Leader` could return the wrong error
+   ([#685](https://github.com/Shopify/sarama/pull/685)).
+ - Fix a possible tight loop in the consumer
+   ([#693](https://github.com/Shopify/sarama/pull/693)).
+ - Match upstream's offset-tracking behaviour
+   ([#705](https://github.com/Shopify/sarama/pull/705)).
+ - Report UnknownTopicOrPartition errors from the offset manager
+   ([#706](https://github.com/Shopify/sarama/pull/706)).
+ - Fix possible negative partition value from the HashPartitioner
+   ([#709](https://github.com/Shopify/sarama/pull/709)).
+
+#### Version 1.9.0 (2016-05-16)
+
+New Features:
+ - Add support for custom offset manager retention durations
+   ([#602](https://github.com/Shopify/sarama/pull/602)).
+ - Publish low-level mocks to enable testing of third-party producer/consumer
+   implementations ([#570](https://github.com/Shopify/sarama/pull/570)).
+ - Declare support for Golang 1.6
+   ([#611](https://github.com/Shopify/sarama/pull/611)).
+ - Support for SASL plain-text auth
+   ([#648](https://github.com/Shopify/sarama/pull/648)).
+
+Improvements:
+ - Simplified broker locking scheme slightly
+   ([#604](https://github.com/Shopify/sarama/pull/604)).
+ - Documentation cleanup
+   ([#605](https://github.com/Shopify/sarama/pull/605),
+    [#621](https://github.com/Shopify/sarama/pull/621),
+    [#654](https://github.com/Shopify/sarama/pull/654)).
+
+Bug Fixes:
+ - Fix race condition shutting down the OffsetManager
+   ([#658](https://github.com/Shopify/sarama/pull/658)).
+
+#### Version 1.8.0 (2016-02-01)
+
+New Features:
+ - Full support for Kafka 0.9:
+   - All protocol messages and fields
+   ([#586](https://github.com/Shopify/sarama/pull/586),
+   [#588](https://github.com/Shopify/sarama/pull/588),
+   [#590](https://github.com/Shopify/sarama/pull/590)).
+   - Verified that TLS support works
+   ([#581](https://github.com/Shopify/sarama/pull/581)).
+   - Fixed the OffsetManager compatibility
+   ([#585](https://github.com/Shopify/sarama/pull/585)).
+
+Improvements:
+ - Optimize for fewer system calls when reading from the network
+   ([#584](https://github.com/Shopify/sarama/pull/584)).
+ - Automatically retry `InvalidMessage` errors to match upstream behaviour
+   ([#589](https://github.com/Shopify/sarama/pull/589)).
+
+#### Version 1.7.0 (2015-12-11)
+
+New Features:
+ - Preliminary support for Kafka 0.9
+   ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several
+   caveats:
+   - Protocol-layer support is mostly in place
+     ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9
+     renamed some messages and fields, which we did not in order to preserve API
+     compatibility.
+   - The producer and consumer work against 0.9, but the offset manager does
+     not ([#573](https://github.com/Shopify/sarama/pull/573)).
+   - TLS support may or may not work
+     ([#581](https://github.com/Shopify/sarama/pull/581)).
+
+Improvements:
+ - Don't wait for request timeouts on dead brokers, greatly speeding recovery
+   when the TCP connection is left hanging
+   ([#548](https://github.com/Shopify/sarama/pull/548)).
+ - Refactored part of the producer. The new version provides a much more elegant
+   solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also
+   slightly more efficient, and much more precise in calculating batch sizes
+   when compression is used
+   ([#549](https://github.com/Shopify/sarama/pull/549),
+   [#550](https://github.com/Shopify/sarama/pull/550),
+   [#551](https://github.com/Shopify/sarama/pull/551)).
+
+Bug Fixes:
+ - Fix race condition in consumer test mock
+   ([#553](https://github.com/Shopify/sarama/pull/553)).
+
+#### Version 1.6.1 (2015-09-25)
+
+Bug Fixes:
+ - Fix panic that could occur if a user-supplied message value failed to encode
+   ([#449](https://github.com/Shopify/sarama/pull/449)).
+
+#### Version 1.6.0 (2015-09-04)
+
+New Features:
+ - Implementation of a consumer offset manager using the APIs introduced in
+   Kafka 0.8.2. The API is designed mainly for integration into a future
+   high-level consumer, not for direct use, although it is *possible* to use it
+   directly.
+   ([#461](https://github.com/Shopify/sarama/pull/461)).
+
+Improvements:
+ - CRC32 calculation is much faster on machines with SSE4.2 instructions,
+   removing a major hotspot from most profiles
+   ([#255](https://github.com/Shopify/sarama/pull/255)).
+
+Bug Fixes:
+ - Make protocol decoding more robust against some malformed packets generated
+   by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523),
+   [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways
+   ([#528](https://github.com/Shopify/sarama/pull/528)).
+ - Fix a potential race condition panic in the consumer on shutdown
+   ([#529](https://github.com/Shopify/sarama/pull/529)).
+
+#### Version 1.5.0 (2015-08-17)
+
+New Features:
+ - TLS-encrypted network connections are now supported. This feature is subject
+   to change when Kafka releases built-in TLS support, but for now this is
+   enough to work with TLS-terminating proxies
+   ([#154](https://github.com/Shopify/sarama/pull/154)).
+
+Improvements:
+ - The consumer will not block if a single partition is not drained by the user;
+   all other partitions will continue to consume normally
+   ([#485](https://github.com/Shopify/sarama/pull/485)).
+ - Formatting of error strings has been much improved
+   ([#495](https://github.com/Shopify/sarama/pull/495)).
+ - Internal refactoring of the producer for code cleanliness and to enable
+   future work ([#300](https://github.com/Shopify/sarama/pull/300)).
+
+Bug Fixes:
+ - Fix a potential deadlock in the consumer on shutdown
+   ([#475](https://github.com/Shopify/sarama/pull/475)).
+
+#### Version 1.4.3 (2015-07-21)
+
+Bug Fixes:
+ - Don't include the partitioner in the producer's "fetch partitions"
+   circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)).
+ - Don't retry messages until the broker is closed when abandoning a broker in
+   the producer ([#468](https://github.com/Shopify/sarama/pull/468)).
+ - Update the import path for snappy-go, it has moved again and the API has
+   changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)).
+
+#### Version 1.4.2 (2015-05-27)
+
+Bug Fixes:
+ - Update the import path for snappy-go, it has moved from google code to github
+   ([#456](https://github.com/Shopify/sarama/pull/456)).
+
+#### Version 1.4.1 (2015-05-25)
+
+Improvements:
+ - Optimizations when decoding snappy messages, thanks to John Potocny
+   ([#446](https://github.com/Shopify/sarama/pull/446)).
+
+Bug Fixes:
+ - Fix hypothetical race conditions on producer shutdown
+   ([#450](https://github.com/Shopify/sarama/pull/450),
+   [#451](https://github.com/Shopify/sarama/pull/451)).
+
+#### Version 1.4.0 (2015-05-01)
+
+New Features:
+ - The consumer now implements `Topics()` and `Partitions()` methods to enable
+   users to dynamically choose what topics/partitions to consume without
+   instantiating a full client
+   ([#431](https://github.com/Shopify/sarama/pull/431)).
+ - The partition-consumer now exposes the high water mark offset value returned
+   by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)).
+ - Added a `kafka-console-consumer` tool capable of handling multiple
+   partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
+   ([#439](https://github.com/Shopify/sarama/pull/439),
+   [#442](https://github.com/Shopify/sarama/pull/442)).
+
+Improvements:
+ - The producer's logging during retry scenarios is more consistent, more
+   useful, and slightly less verbose
+   ([#429](https://github.com/Shopify/sarama/pull/429)).
+ - The client now shuffles its initial list of seed brokers in order to prevent
+   thundering herd on the first broker in the list
+   ([#441](https://github.com/Shopify/sarama/pull/441)).
+
+Bug Fixes:
+ - The producer now correctly manages its state if retries occur when it is
+   shutting down, fixing several instances of confusing behaviour and at least
+   one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)).
+ - The consumer now handles messages for different partitions asynchronously,
+   making it much more resilient to specific user code ordering
+   ([#325](https://github.com/Shopify/sarama/pull/325)).
+
+#### Version 1.3.0 (2015-04-16)
+
+New Features:
+ - The client now tracks consumer group coordinators using
+   ConsumerMetadataRequests similar to how it tracks partition leadership using
+   regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)).
+   This adds two methods to the client API:
+   - `Coordinator(consumerGroup string) (*Broker, error)`
+   - `RefreshCoordinator(consumerGroup string) error`
+
+Improvements:
+ - ConsumerMetadataResponses now automatically create a Broker object out of the
+   ID/address/port combination for the Coordinator; accessing the fields
+   individually has been deprecated
+   ([#413](https://github.com/Shopify/sarama/pull/413)).
+ - Much improved handling of `OffsetOutOfRange` errors in the consumer.
+   Consumers will fail to start if the provided offset is out of range
+   ([#418](https://github.com/Shopify/sarama/pull/418))
+   and they will automatically shut down if the offset falls out of range
+   ([#424](https://github.com/Shopify/sarama/pull/424)).
+ - Small performance improvement in encoding and decoding protocol messages
+   ([#427](https://github.com/Shopify/sarama/pull/427)).
+
+Bug Fixes:
+ - Fix a rare race condition in the client's background metadata refresher if
+   it happens to be activated while the client is being closed
+   ([#422](https://github.com/Shopify/sarama/pull/422)).
+
+#### Version 1.2.0 (2015-04-07)
+
+Improvements:
+ - The producer's behaviour when `Flush.Frequency` is set is now more intuitive
+   ([#389](https://github.com/Shopify/sarama/pull/389)).
+ - The producer is now somewhat more memory-efficient during and after retrying
+   messages due to an improved queue implementation
+   ([#396](https://github.com/Shopify/sarama/pull/396)).
+ - The consumer produces much more useful logging output when leadership
+   changes ([#385](https://github.com/Shopify/sarama/pull/385)).
+ - The client's `GetOffset` method will now automatically refresh metadata and
+   retry once in the event of stale information or similar
+   ([#394](https://github.com/Shopify/sarama/pull/394)).
+ - Broker connections now have support for using TCP keepalives
+   ([#407](https://github.com/Shopify/sarama/issues/407)).
+
+Bug Fixes:
+ - The OffsetCommitRequest message now correctly implements all three possible
+   API versions ([#390](https://github.com/Shopify/sarama/pull/390),
+   [#400](https://github.com/Shopify/sarama/pull/400)).
+
+#### Version 1.1.0 (2015-03-20)
+
+Improvements:
+ - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
+   broken topics don't choke throughput
+   ([#373](https://github.com/Shopify/sarama/pull/373)).
+
+Bug Fixes:
+ - Fix the producer's internal reference counting in certain unusual scenarios
+   ([#367](https://github.com/Shopify/sarama/pull/367)).
+ - Fix the consumer's internal reference counting in certain unusual scenarios
+   ([#369](https://github.com/Shopify/sarama/pull/369)).
+ - Fix a condition where the producer's internal control messages could have
+   gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)).
+ - Fix an issue where invalid partition lists would be cached when asking for
+   metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)).
+
+
+#### Version 1.0.0 (2015-03-17)
+
+Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
+
+- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
+- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
+- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package.
+- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
+- All the configuration values have been unified in the `Config` struct.
+- Much improved test suite.
diff --git a/vendor/github.com/Shopify/sarama/LICENSE b/vendor/github.com/Shopify/sarama/LICENSE
new file mode 100644
index 0000000..d2bf435
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2013 Shopify
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile
new file mode 100644
index 0000000..8fcf219
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/Makefile
@@ -0,0 +1,30 @@
+default: fmt vet errcheck test
+
+# Taken from https://github.com/codecov/example-go#caveat-multiple-files
+test:
+	echo "" > coverage.txt
+	for d in `go list ./... | grep -v vendor`; do \
+		go test -p 1 -v -timeout 240s -race -coverprofile=profile.out -covermode=atomic $$d || exit 1; \
+		if [ -f profile.out ]; then \
+			cat profile.out >> coverage.txt; \
+			rm profile.out; \
+		fi \
+	done
+
+vet:
+	go vet ./...
+
+# See https://github.com/kisielk/errcheck/pull/141 for details on ignorepkg
+errcheck:
+	errcheck -ignorepkg fmt github.com/Shopify/sarama/...
+
+fmt:
+	@if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
+
+install_dependencies: install_errcheck get
+
+install_errcheck:
+	go get github.com/kisielk/errcheck
+
+get:
+	go get -t
diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md
new file mode 100644
index 0000000..f241b89
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/README.md
@@ -0,0 +1,39 @@
+sarama
+======
+
+[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.svg)](https://godoc.org/github.com/Shopify/sarama)
+[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama)
+[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama)
+
+Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
+
+### Getting started
+
+- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama).
+- Mocks for testing are available in the [mocks](./mocks) subpackage.
+- The [examples](./examples) directory contains more elaborate example applications.
+- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
+
+You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions).
+
+### Compatibility and API stability
+
+Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
+the two latest stable releases of Kafka and Go, and we provide a two month
+grace period for older releases. This means we currently officially support
+Go 1.8 through 1.11, and Kafka 1.0 through 2.0, although older releases are
+still likely to work.
+
+Sarama follows semantic versioning and provides API stability via the gopkg.in service.
+You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
+A changelog is available [here](CHANGELOG.md).
+
+### Contributing
+
+* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
+* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more
+  technical and design details.
+* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol)
+  contains a wealth of useful information.
+* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
+* If you have any questions, just ask!
diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/Shopify/sarama/Vagrantfile
new file mode 100644
index 0000000..f4b848a
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/Vagrantfile
@@ -0,0 +1,20 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
+VAGRANTFILE_API_VERSION = "2"
+
+# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB
+MEMORY = 3072
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+  config.vm.box = "ubuntu/trusty64"
+
+  config.vm.provision :shell, path: "vagrant/provision.sh"
+
+  config.vm.network "private_network", ip: "192.168.100.67"
+
+  config.vm.provider "virtualbox" do |v|
+    v.memory = MEMORY
+  end
+end
diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/Shopify/sarama/acl_bindings.go
new file mode 100644
index 0000000..5151735
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_bindings.go
@@ -0,0 +1,119 @@
+package sarama
+
+type Resource struct {
+	ResourceType AclResourceType
+	ResourceName string
+}
+
+func (r *Resource) encode(pe packetEncoder) error {
+	pe.putInt8(int8(r.ResourceType))
+
+	if err := pe.putString(r.ResourceName); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *Resource) decode(pd packetDecoder, version int16) (err error) {
+	resourceType, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	r.ResourceType = AclResourceType(resourceType)
+
+	if r.ResourceName, err = pd.getString(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+type Acl struct {
+	Principal      string
+	Host           string
+	Operation      AclOperation
+	PermissionType AclPermissionType
+}
+
+func (a *Acl) encode(pe packetEncoder) error {
+	if err := pe.putString(a.Principal); err != nil {
+		return err
+	}
+
+	if err := pe.putString(a.Host); err != nil {
+		return err
+	}
+
+	pe.putInt8(int8(a.Operation))
+	pe.putInt8(int8(a.PermissionType))
+
+	return nil
+}
+
+func (a *Acl) decode(pd packetDecoder, version int16) (err error) {
+	if a.Principal, err = pd.getString(); err != nil {
+		return err
+	}
+
+	if a.Host, err = pd.getString(); err != nil {
+		return err
+	}
+
+	operation, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.Operation = AclOperation(operation)
+
+	permissionType, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.PermissionType = AclPermissionType(permissionType)
+
+	return nil
+}
+
+type ResourceAcls struct {
+	Resource
+	Acls []*Acl
+}
+
+func (r *ResourceAcls) encode(pe packetEncoder) error {
+	if err := r.Resource.encode(pe); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.Acls)); err != nil {
+		return err
+	}
+	for _, acl := range r.Acls {
+		if err := acl.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *ResourceAcls) decode(pd packetDecoder, version int16) error {
+	if err := r.Resource.decode(pd, version); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Acls = make([]*Acl, n)
+	for i := 0; i < n; i++ {
+		r.Acls[i] = new(Acl)
+		if err := r.Acls[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/Shopify/sarama/acl_create_request.go
new file mode 100644
index 0000000..0b6ecbe
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_create_request.go
@@ -0,0 +1,76 @@
+package sarama
+
+type CreateAclsRequest struct {
+	AclCreations []*AclCreation
+}
+
+func (c *CreateAclsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(c.AclCreations)); err != nil {
+		return err
+	}
+
+	for _, aclCreation := range c.AclCreations {
+		if err := aclCreation.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.AclCreations = make([]*AclCreation, n)
+
+	for i := 0; i < n; i++ {
+		c.AclCreations[i] = new(AclCreation)
+		if err := c.AclCreations[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *CreateAclsRequest) key() int16 {
+	return 30
+}
+
+func (d *CreateAclsRequest) version() int16 {
+	return 0
+}
+
+func (d *CreateAclsRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
+
+type AclCreation struct {
+	Resource
+	Acl
+}
+
+func (a *AclCreation) encode(pe packetEncoder) error {
+	if err := a.Resource.encode(pe); err != nil {
+		return err
+	}
+	if err := a.Acl.encode(pe); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) {
+	if err := a.Resource.decode(pd, version); err != nil {
+		return err
+	}
+	if err := a.Acl.decode(pd, version); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/Shopify/sarama/acl_create_response.go
new file mode 100644
index 0000000..8a56f35
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_create_response.go
@@ -0,0 +1,88 @@
+package sarama
+
+import "time"
+
+type CreateAclsResponse struct {
+	ThrottleTime         time.Duration
+	AclCreationResponses []*AclCreationResponse
+}
+
+func (c *CreateAclsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
+
+	if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil {
+		return err
+	}
+
+	for _, aclCreationResponse := range c.AclCreationResponses {
+		if err := aclCreationResponse.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.AclCreationResponses = make([]*AclCreationResponse, n)
+	for i := 0; i < n; i++ {
+		c.AclCreationResponses[i] = new(AclCreationResponse)
+		if err := c.AclCreationResponses[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *CreateAclsResponse) key() int16 {
+	return 30
+}
+
+func (d *CreateAclsResponse) version() int16 {
+	return 0
+}
+
+func (d *CreateAclsResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
+
+type AclCreationResponse struct {
+	Err    KError
+	ErrMsg *string
+}
+
+func (a *AclCreationResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(a.Err))
+
+	if err := pe.putNullableString(a.ErrMsg); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	a.Err = KError(kerr)
+
+	if a.ErrMsg, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/Shopify/sarama/acl_delete_request.go
new file mode 100644
index 0000000..4133dce
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_delete_request.go
@@ -0,0 +1,48 @@
+package sarama
+
+type DeleteAclsRequest struct {
+	Filters []*AclFilter
+}
+
+func (d *DeleteAclsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(d.Filters)); err != nil {
+		return err
+	}
+
+	for _, filter := range d.Filters {
+		if err := filter.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	d.Filters = make([]*AclFilter, n)
+	for i := 0; i < n; i++ {
+		d.Filters[i] = new(AclFilter)
+		if err := d.Filters[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteAclsRequest) key() int16 {
+	return 31
+}
+
+func (d *DeleteAclsRequest) version() int16 {
+	return 0
+}
+
+func (d *DeleteAclsRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/Shopify/sarama/acl_delete_response.go
new file mode 100644
index 0000000..b5e1c45
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_delete_response.go
@@ -0,0 +1,155 @@
+package sarama
+
+import "time"
+
+type DeleteAclsResponse struct {
+	ThrottleTime    time.Duration
+	FilterResponses []*FilterResponse
+}
+
+func (a *DeleteAclsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
+
+	if err := pe.putArrayLength(len(a.FilterResponses)); err != nil {
+		return err
+	}
+
+	for _, filterResponse := range a.FilterResponses {
+		if err := filterResponse.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	a.FilterResponses = make([]*FilterResponse, n)
+
+	for i := 0; i < n; i++ {
+		a.FilterResponses[i] = new(FilterResponse)
+		if err := a.FilterResponses[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteAclsResponse) key() int16 {
+	return 31
+}
+
+func (d *DeleteAclsResponse) version() int16 {
+	return 0
+}
+
+func (d *DeleteAclsResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
+
+type FilterResponse struct {
+	Err          KError
+	ErrMsg       *string
+	MatchingAcls []*MatchingAcl
+}
+
+func (f *FilterResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(f.Err))
+	if err := pe.putNullableString(f.ErrMsg); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil {
+		return err
+	}
+	for _, matchingAcl := range f.MatchingAcls {
+		if err := matchingAcl.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	f.Err = KError(kerr)
+
+	if f.ErrMsg, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	f.MatchingAcls = make([]*MatchingAcl, n)
+	for i := 0; i < n; i++ {
+		f.MatchingAcls[i] = new(MatchingAcl)
+		if err := f.MatchingAcls[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+type MatchingAcl struct {
+	Err    KError
+	ErrMsg *string
+	Resource
+	Acl
+}
+
+func (m *MatchingAcl) encode(pe packetEncoder) error {
+	pe.putInt16(int16(m.Err))
+	if err := pe.putNullableString(m.ErrMsg); err != nil {
+		return err
+	}
+
+	if err := m.Resource.encode(pe); err != nil {
+		return err
+	}
+
+	if err := m.Acl.encode(pe); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	m.Err = KError(kerr)
+
+	if m.ErrMsg, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	if err := m.Resource.decode(pd, version); err != nil {
+		return err
+	}
+
+	if err := m.Acl.decode(pd, version); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/Shopify/sarama/acl_describe_request.go
new file mode 100644
index 0000000..02a5a1f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_describe_request.go
@@ -0,0 +1,25 @@
+package sarama
+
+type DescribeAclsRequest struct {
+	AclFilter
+}
+
+func (d *DescribeAclsRequest) encode(pe packetEncoder) error {
+	return d.AclFilter.encode(pe)
+}
+
+func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) {
+	return d.AclFilter.decode(pd, version)
+}
+
+func (d *DescribeAclsRequest) key() int16 {
+	return 29
+}
+
+func (d *DescribeAclsRequest) version() int16 {
+	return 0
+}
+
+func (d *DescribeAclsRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/Shopify/sarama/acl_describe_response.go
new file mode 100644
index 0000000..5bc9497
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_describe_response.go
@@ -0,0 +1,80 @@
+package sarama
+
+import "time"
+
+type DescribeAclsResponse struct {
+	ThrottleTime time.Duration
+	Err          KError
+	ErrMsg       *string
+	ResourceAcls []*ResourceAcls
+}
+
+func (d *DescribeAclsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
+	pe.putInt16(int16(d.Err))
+
+	if err := pe.putNullableString(d.ErrMsg); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil {
+		return err
+	}
+
+	for _, resourceAcl := range d.ResourceAcls {
+		if err := resourceAcl.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	d.Err = KError(kerr)
+
+	errmsg, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	if errmsg != "" {
+		d.ErrMsg = &errmsg
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	d.ResourceAcls = make([]*ResourceAcls, n)
+
+	for i := 0; i < n; i++ {
+		d.ResourceAcls[i] = new(ResourceAcls)
+		if err := d.ResourceAcls[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DescribeAclsResponse) key() int16 {
+	return 29
+}
+
+func (d *DescribeAclsResponse) version() int16 {
+	return 0
+}
+
+func (d *DescribeAclsResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/Shopify/sarama/acl_filter.go
new file mode 100644
index 0000000..9706354
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_filter.go
@@ -0,0 +1,61 @@
+package sarama
+
+type AclFilter struct {
+	ResourceType   AclResourceType
+	ResourceName   *string
+	Principal      *string
+	Host           *string
+	Operation      AclOperation
+	PermissionType AclPermissionType
+}
+
+func (a *AclFilter) encode(pe packetEncoder) error {
+	pe.putInt8(int8(a.ResourceType))
+	if err := pe.putNullableString(a.ResourceName); err != nil {
+		return err
+	}
+	if err := pe.putNullableString(a.Principal); err != nil {
+		return err
+	}
+	if err := pe.putNullableString(a.Host); err != nil {
+		return err
+	}
+	pe.putInt8(int8(a.Operation))
+	pe.putInt8(int8(a.PermissionType))
+
+	return nil
+}
+
+func (a *AclFilter) decode(pd packetDecoder, version int16) (err error) {
+	resourceType, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.ResourceType = AclResourceType(resourceType)
+
+	if a.ResourceName, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	if a.Principal, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	if a.Host, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	operation, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.Operation = AclOperation(operation)
+
+	permissionType, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.PermissionType = AclPermissionType(permissionType)
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/Shopify/sarama/acl_types.go
new file mode 100644
index 0000000..19da6f2
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/acl_types.go
@@ -0,0 +1,42 @@
+package sarama
+
+type AclOperation int
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
+const (
+	AclOperationUnknown         AclOperation = 0
+	AclOperationAny             AclOperation = 1
+	AclOperationAll             AclOperation = 2
+	AclOperationRead            AclOperation = 3
+	AclOperationWrite           AclOperation = 4
+	AclOperationCreate          AclOperation = 5
+	AclOperationDelete          AclOperation = 6
+	AclOperationAlter           AclOperation = 7
+	AclOperationDescribe        AclOperation = 8
+	AclOperationClusterAction   AclOperation = 9
+	AclOperationDescribeConfigs AclOperation = 10
+	AclOperationAlterConfigs    AclOperation = 11
+	AclOperationIdempotentWrite AclOperation = 12
+)
+
+type AclPermissionType int
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java
+const (
+	AclPermissionUnknown AclPermissionType = 0
+	AclPermissionAny     AclPermissionType = 1
+	AclPermissionDeny    AclPermissionType = 2
+	AclPermissionAllow   AclPermissionType = 3
+)
+
+type AclResourceType int
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
+const (
+	AclResourceUnknown         AclResourceType = 0
+	AclResourceAny             AclResourceType = 1
+	AclResourceTopic           AclResourceType = 2
+	AclResourceGroup           AclResourceType = 3
+	AclResourceCluster         AclResourceType = 4
+	AclResourceTransactionalID AclResourceType = 5
+)
diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
new file mode 100644
index 0000000..6da166c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
@@ -0,0 +1,52 @@
+package sarama
+
+type AddOffsetsToTxnRequest struct {
+	TransactionalID string
+	ProducerID      int64
+	ProducerEpoch   int16
+	GroupID         string
+}
+
+func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(a.TransactionalID); err != nil {
+		return err
+	}
+
+	pe.putInt64(a.ProducerID)
+
+	pe.putInt16(a.ProducerEpoch)
+
+	if err := pe.putString(a.GroupID); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
+	if a.TransactionalID, err = pd.getString(); err != nil {
+		return err
+	}
+	if a.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if a.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+	if a.GroupID, err = pd.getString(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (a *AddOffsetsToTxnRequest) key() int16 {
+	return 25
+}
+
+func (a *AddOffsetsToTxnRequest) version() int16 {
+	return 0
+}
+
+func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
new file mode 100644
index 0000000..3a46151
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
@@ -0,0 +1,44 @@
+package sarama
+
+import (
+	"time"
+)
+
+type AddOffsetsToTxnResponse struct {
+	ThrottleTime time.Duration
+	Err          KError
+}
+
+func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
+	pe.putInt16(int16(a.Err))
+	return nil
+}
+
+func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	a.Err = KError(kerr)
+
+	return nil
+}
+
+func (a *AddOffsetsToTxnResponse) key() int16 {
+	return 25
+}
+
+func (a *AddOffsetsToTxnResponse) version() int16 {
+	return 0
+}
+
+func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
new file mode 100644
index 0000000..a8a5922
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
@@ -0,0 +1,76 @@
+package sarama
+
+type AddPartitionsToTxnRequest struct {
+	TransactionalID string
+	ProducerID      int64
+	ProducerEpoch   int16
+	TopicPartitions map[string][]int32
+}
+
+func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(a.TransactionalID); err != nil {
+		return err
+	}
+	pe.putInt64(a.ProducerID)
+	pe.putInt16(a.ProducerEpoch)
+
+	if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil {
+		return err
+	}
+	for topic, partitions := range a.TopicPartitions {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putInt32Array(partitions); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
+	if a.TransactionalID, err = pd.getString(); err != nil {
+		return err
+	}
+	if a.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if a.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.TopicPartitions = make(map[string][]int32)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		partitions, err := pd.getInt32Array()
+		if err != nil {
+			return err
+		}
+
+		a.TopicPartitions[topic] = partitions
+	}
+
+	return nil
+}
+
+func (a *AddPartitionsToTxnRequest) key() int16 {
+	return 24
+}
+
+func (a *AddPartitionsToTxnRequest) version() int16 {
+	return 0
+}
+
+func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
new file mode 100644
index 0000000..581c556
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
@@ -0,0 +1,108 @@
+package sarama
+
+import (
+	"time"
+)
+
+type AddPartitionsToTxnResponse struct {
+	ThrottleTime time.Duration
+	Errors       map[string][]*PartitionError
+}
+
+func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
+	if err := pe.putArrayLength(len(a.Errors)); err != nil {
+		return err
+	}
+
+	for topic, e := range a.Errors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(e)); err != nil {
+			return err
+		}
+		for _, partitionError := range e {
+			if err := partitionError.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.Errors = make(map[string][]*PartitionError)
+
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		m, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		a.Errors[topic] = make([]*PartitionError, m)
+
+		for j := 0; j < m; j++ {
+			a.Errors[topic][j] = new(PartitionError)
+			if err := a.Errors[topic][j].decode(pd, version); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (a *AddPartitionsToTxnResponse) key() int16 {
+	return 24
+}
+
+func (a *AddPartitionsToTxnResponse) version() int16 {
+	return 0
+}
+
+func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
+
+type PartitionError struct {
+	Partition int32
+	Err       KError
+}
+
+func (p *PartitionError) encode(pe packetEncoder) error {
+	pe.putInt32(p.Partition)
+	pe.putInt16(int16(p.Err))
+	return nil
+}
+
+func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) {
+	if p.Partition, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	p.Err = KError(kerr)
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/Shopify/sarama/admin.go
new file mode 100644
index 0000000..5272575
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/admin.go
@@ -0,0 +1,382 @@
+package sarama
+
+import "errors"
+
+// ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics,
+// brokers, configurations and ACLs. The minimum broker version required is 0.10.0.0.
+// Methods with stricter requirements will specify the minimum broker version required.
+// You MUST call Close() on a client to avoid leaks
+type ClusterAdmin interface {
+	// Creates a new topic. This operation is supported by brokers with version 0.10.1.0 or higher.
+	// It may take several seconds after CreateTopic returns success for all the brokers
+	// to become aware that the topic has been created. During this time, listTopics
+	// may not return information about the new topic.The validateOnly option is supported from version 0.10.2.0.
+	CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error
+
+	// Delete a topic. It may take several seconds after the DeleteTopic to returns success
+	// and for all the brokers to become aware that the topics are gone.
+	// During this time, listTopics  may continue to return information about the deleted topic.
+	// If delete.topic.enable is false on the brokers, deleteTopic will mark
+	// the topic for deletion, but not actually delete them.
+	// This operation is supported by brokers with version 0.10.1.0 or higher.
+	DeleteTopic(topic string) error
+
+	// Increase the number of partitions of the topics  according to the corresponding values.
+	// If partitions are increased for a topic that has a key, the partition logic or ordering of
+	// the messages will be affected. It may take several seconds after this method returns
+	// success for all the brokers to become aware that the partitions have been created.
+	// During this time, ClusterAdmin#describeTopics may not return information about the
+	// new partitions. This operation is supported by brokers with version 1.0.0 or higher.
+	CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error
+
+	// Delete records whose offset is smaller than the given offset of the corresponding partition.
+	// This operation is supported by brokers with version 0.11.0.0 or higher.
+	DeleteRecords(topic string, partitionOffsets map[int32]int64) error
+
+	// Get the configuration for the specified resources.
+	// The returned configuration includes default values and the Default is true
+	// can be used to distinguish them from user supplied values.
+	// Config entries where ReadOnly is true cannot be updated.
+	// The value of config entries where Sensitive is true is always nil so
+	// sensitive information is not disclosed.
+	// This operation is supported by brokers with version 0.11.0.0 or higher.
+	DescribeConfig(resource ConfigResource) ([]ConfigEntry, error)
+
+	// Update the configuration for the specified resources with the default options.
+	// This operation is supported by brokers with version 0.11.0.0 or higher.
+	// The resources with their configs (topic is the only resource type with configs
+	// that can be updated currently Updates are not transactional so they may succeed
+	// for some resources while fail for others. The configs for a particular resource are updated automatically.
+	AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error
+
+	// Creates access control lists (ACLs) which are bound to specific resources.
+	// This operation is not transactional so it may succeed for some ACLs while fail for others.
+	// If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but
+	// no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher.
+	CreateACL(resource Resource, acl Acl) error
+
+	// Lists access control lists (ACLs) according to the supplied filter.
+	// it may take some time for changes made by createAcls or deleteAcls to be reflected in the output of ListAcls
+	// This operation is supported by brokers with version 0.11.0.0 or higher.
+	ListAcls(filter AclFilter) ([]ResourceAcls, error)
+
+	// Deletes access control lists (ACLs) according to the supplied filters.
+	// This operation is not transactional so it may succeed for some ACLs while fail for others.
+	// This operation is supported by brokers with version 0.11.0.0 or higher.
+	DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error)
+
+	// Close shuts down the admin and closes underlying client.
+	Close() error
+}
+
+type clusterAdmin struct {
+	client Client
+	conf   *Config
+}
+
+// NewClusterAdmin creates a new ClusterAdmin using the given broker addresses and configuration.
+func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) {
+	client, err := NewClient(addrs, conf)
+	if err != nil {
+		return nil, err
+	}
+
+	//make sure we can retrieve the controller
+	_, err = client.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	ca := &clusterAdmin{
+		client: client,
+		conf:   client.Config(),
+	}
+	return ca, nil
+}
+
+func (ca *clusterAdmin) Close() error {
+	return ca.client.Close()
+}
+
+func (ca *clusterAdmin) Controller() (*Broker, error) {
+	return ca.client.Controller()
+}
+
+func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error {
+
+	if topic == "" {
+		return ErrInvalidTopic
+	}
+
+	if detail == nil {
+		return errors.New("You must specify topic details")
+	}
+
+	topicDetails := make(map[string]*TopicDetail)
+	topicDetails[topic] = detail
+
+	request := &CreateTopicsRequest{
+		TopicDetails: topicDetails,
+		ValidateOnly: validateOnly,
+		Timeout:      ca.conf.Admin.Timeout,
+	}
+
+	if ca.conf.Version.IsAtLeast(V0_11_0_0) {
+		request.Version = 1
+	}
+	if ca.conf.Version.IsAtLeast(V1_0_0_0) {
+		request.Version = 2
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return err
+	}
+
+	rsp, err := b.CreateTopics(request)
+	if err != nil {
+		return err
+	}
+
+	topicErr, ok := rsp.TopicErrors[topic]
+	if !ok {
+		return ErrIncompleteResponse
+	}
+
+	if topicErr.Err != ErrNoError {
+		return topicErr.Err
+	}
+
+	return nil
+}
+
+func (ca *clusterAdmin) DeleteTopic(topic string) error {
+
+	if topic == "" {
+		return ErrInvalidTopic
+	}
+
+	request := &DeleteTopicsRequest{
+		Topics:  []string{topic},
+		Timeout: ca.conf.Admin.Timeout,
+	}
+
+	if ca.conf.Version.IsAtLeast(V0_11_0_0) {
+		request.Version = 1
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return err
+	}
+
+	rsp, err := b.DeleteTopics(request)
+	if err != nil {
+		return err
+	}
+
+	topicErr, ok := rsp.TopicErrorCodes[topic]
+	if !ok {
+		return ErrIncompleteResponse
+	}
+
+	if topicErr != ErrNoError {
+		return topicErr
+	}
+	return nil
+}
+
+func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error {
+	if topic == "" {
+		return ErrInvalidTopic
+	}
+
+	topicPartitions := make(map[string]*TopicPartition)
+	topicPartitions[topic] = &TopicPartition{Count: count, Assignment: assignment}
+
+	request := &CreatePartitionsRequest{
+		TopicPartitions: topicPartitions,
+		Timeout:         ca.conf.Admin.Timeout,
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return err
+	}
+
+	rsp, err := b.CreatePartitions(request)
+	if err != nil {
+		return err
+	}
+
+	topicErr, ok := rsp.TopicPartitionErrors[topic]
+	if !ok {
+		return ErrIncompleteResponse
+	}
+
+	if topicErr.Err != ErrNoError {
+		return topicErr.Err
+	}
+
+	return nil
+}
+
+func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error {
+
+	if topic == "" {
+		return ErrInvalidTopic
+	}
+
+	topics := make(map[string]*DeleteRecordsRequestTopic)
+	topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: partitionOffsets}
+	request := &DeleteRecordsRequest{
+		Topics:  topics,
+		Timeout: ca.conf.Admin.Timeout,
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return err
+	}
+
+	rsp, err := b.DeleteRecords(request)
+	if err != nil {
+		return err
+	}
+
+	_, ok := rsp.Topics[topic]
+	if !ok {
+		return ErrIncompleteResponse
+	}
+
+	//todo since we are dealing with couple of partitions it would be good if we return slice of errors
+	//for each partition instead of one error
+	return nil
+}
+
+func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) {
+
+	var entries []ConfigEntry
+	var resources []*ConfigResource
+	resources = append(resources, &resource)
+
+	request := &DescribeConfigsRequest{
+		Resources: resources,
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	rsp, err := b.DescribeConfigs(request)
+	if err != nil {
+		return nil, err
+	}
+
+	for _, rspResource := range rsp.Resources {
+		if rspResource.Name == resource.Name {
+			if rspResource.ErrorMsg != "" {
+				return nil, errors.New(rspResource.ErrorMsg)
+			}
+			for _, cfgEntry := range rspResource.Configs {
+				entries = append(entries, *cfgEntry)
+			}
+		}
+	}
+	return entries, nil
+}
+
+func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error {
+
+	var resources []*AlterConfigsResource
+	resources = append(resources, &AlterConfigsResource{
+		Type:          resourceType,
+		Name:          name,
+		ConfigEntries: entries,
+	})
+
+	request := &AlterConfigsRequest{
+		Resources:    resources,
+		ValidateOnly: validateOnly,
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return err
+	}
+
+	rsp, err := b.AlterConfigs(request)
+	if err != nil {
+		return err
+	}
+
+	for _, rspResource := range rsp.Resources {
+		if rspResource.Name == name {
+			if rspResource.ErrorMsg != "" {
+				return errors.New(rspResource.ErrorMsg)
+			}
+		}
+	}
+	return nil
+}
+
+func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error {
+	var acls []*AclCreation
+	acls = append(acls, &AclCreation{resource, acl})
+	request := &CreateAclsRequest{AclCreations: acls}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return err
+	}
+
+	_, err = b.CreateAcls(request)
+	return err
+}
+
+func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) {
+
+	request := &DescribeAclsRequest{AclFilter: filter}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	rsp, err := b.DescribeAcls(request)
+	if err != nil {
+		return nil, err
+	}
+
+	var lAcls []ResourceAcls
+	for _, rAcl := range rsp.ResourceAcls {
+		lAcls = append(lAcls, *rAcl)
+	}
+	return lAcls, nil
+}
+
+func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) {
+	var filters []*AclFilter
+	filters = append(filters, &filter)
+	request := &DeleteAclsRequest{Filters: filters}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	rsp, err := b.DeleteAcls(request)
+	if err != nil {
+		return nil, err
+	}
+
+	var mAcls []MatchingAcl
+	for _, fr := range rsp.FilterResponses {
+		for _, mACL := range fr.MatchingAcls {
+			mAcls = append(mAcls, *mACL)
+		}
+
+	}
+	return mAcls, nil
+}
diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/Shopify/sarama/alter_configs_request.go
new file mode 100644
index 0000000..48c44ea
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/alter_configs_request.go
@@ -0,0 +1,120 @@
+package sarama
+
+type AlterConfigsRequest struct {
+	Resources    []*AlterConfigsResource
+	ValidateOnly bool
+}
+
+type AlterConfigsResource struct {
+	Type          ConfigResourceType
+	Name          string
+	ConfigEntries map[string]*string
+}
+
+func (acr *AlterConfigsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(acr.Resources)); err != nil {
+		return err
+	}
+
+	for _, r := range acr.Resources {
+		if err := r.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putBool(acr.ValidateOnly)
+	return nil
+}
+
+func (acr *AlterConfigsRequest) decode(pd packetDecoder, version int16) error {
+	resourceCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	acr.Resources = make([]*AlterConfigsResource, resourceCount)
+	for i := range acr.Resources {
+		r := &AlterConfigsResource{}
+		err = r.decode(pd, version)
+		if err != nil {
+			return err
+		}
+		acr.Resources[i] = r
+	}
+
+	validateOnly, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+
+	acr.ValidateOnly = validateOnly
+
+	return nil
+}
+
+func (ac *AlterConfigsResource) encode(pe packetEncoder) error {
+	pe.putInt8(int8(ac.Type))
+
+	if err := pe.putString(ac.Name); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(ac.ConfigEntries)); err != nil {
+		return err
+	}
+	for configKey, configValue := range ac.ConfigEntries {
+		if err := pe.putString(configKey); err != nil {
+			return err
+		}
+		if err := pe.putNullableString(configValue); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (ac *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	ac.Type = ConfigResourceType(t)
+
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	ac.Name = name
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		ac.ConfigEntries = make(map[string]*string, n)
+		for i := 0; i < n; i++ {
+			configKey, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			if ac.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
+				return err
+			}
+		}
+	}
+	return err
+}
+
+func (acr *AlterConfigsRequest) key() int16 {
+	return 33
+}
+
+func (acr *AlterConfigsRequest) version() int16 {
+	return 0
+}
+
+func (acr *AlterConfigsRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go
new file mode 100644
index 0000000..29b09e1
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/alter_configs_response.go
@@ -0,0 +1,95 @@
+package sarama
+
+import "time"
+
+type AlterConfigsResponse struct {
+	ThrottleTime time.Duration
+	Resources    []*AlterConfigsResourceResponse
+}
+
+type AlterConfigsResourceResponse struct {
+	ErrorCode int16
+	ErrorMsg  string
+	Type      ConfigResourceType
+	Name      string
+}
+
+func (ct *AlterConfigsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(ct.ThrottleTime / time.Millisecond))
+
+	if err := pe.putArrayLength(len(ct.Resources)); err != nil {
+		return err
+	}
+
+	for i := range ct.Resources {
+		pe.putInt16(ct.Resources[i].ErrorCode)
+		err := pe.putString(ct.Resources[i].ErrorMsg)
+		if err != nil {
+			return nil
+		}
+		pe.putInt8(int8(ct.Resources[i].Type))
+		err = pe.putString(ct.Resources[i].Name)
+		if err != nil {
+			return nil
+		}
+	}
+
+	return nil
+}
+
+func (acr *AlterConfigsResponse) decode(pd packetDecoder, version int16) error {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	acr.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	responseCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	acr.Resources = make([]*AlterConfigsResourceResponse, responseCount)
+
+	for i := range acr.Resources {
+		acr.Resources[i] = new(AlterConfigsResourceResponse)
+
+		errCode, err := pd.getInt16()
+		if err != nil {
+			return err
+		}
+		acr.Resources[i].ErrorCode = errCode
+
+		e, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		acr.Resources[i].ErrorMsg = e
+
+		t, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		acr.Resources[i].Type = ConfigResourceType(t)
+
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		acr.Resources[i].Name = name
+	}
+
+	return nil
+}
+
+func (r *AlterConfigsResponse) key() int16 {
+	return 32
+}
+
+func (r *AlterConfigsResponse) version() int16 {
+	return 0
+}
+
+func (r *AlterConfigsResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go
new file mode 100644
index 0000000..ab65f01
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/api_versions_request.go
@@ -0,0 +1,24 @@
+package sarama
+
+type ApiVersionsRequest struct {
+}
+
+func (r *ApiVersionsRequest) encode(pe packetEncoder) error {
+	return nil
+}
+
+func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
+	return nil
+}
+
+func (r *ApiVersionsRequest) key() int16 {
+	return 18
+}
+
+func (r *ApiVersionsRequest) version() int16 {
+	return 0
+}
+
+func (r *ApiVersionsRequest) requiredVersion() KafkaVersion {
+	return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go
new file mode 100644
index 0000000..23bc326
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/api_versions_response.go
@@ -0,0 +1,87 @@
+package sarama
+
+type ApiVersionsResponseBlock struct {
+	ApiKey     int16
+	MinVersion int16
+	MaxVersion int16
+}
+
+func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error {
+	pe.putInt16(b.ApiKey)
+	pe.putInt16(b.MinVersion)
+	pe.putInt16(b.MaxVersion)
+	return nil
+}
+
+func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
+	var err error
+
+	if b.ApiKey, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	if b.MinVersion, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	if b.MaxVersion, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+type ApiVersionsResponse struct {
+	Err         KError
+	ApiVersions []*ApiVersionsResponseBlock
+}
+
+func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.Err))
+	if err := pe.putArrayLength(len(r.ApiVersions)); err != nil {
+		return err
+	}
+	for _, apiVersion := range r.ApiVersions {
+		if err := apiVersion.encode(pe); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.Err = KError(kerr)
+
+	numBlocks, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks)
+	for i := 0; i < numBlocks; i++ {
+		block := new(ApiVersionsResponseBlock)
+		if err := block.decode(pd); err != nil {
+			return err
+		}
+		r.ApiVersions[i] = block
+	}
+
+	return nil
+}
+
+func (r *ApiVersionsResponse) key() int16 {
+	return 18
+}
+
+func (r *ApiVersionsResponse) version() int16 {
+	return 0
+}
+
+func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
+	return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go
new file mode 100644
index 0000000..89a0c70
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/async_producer.go
@@ -0,0 +1,1052 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/eapache/go-resiliency/breaker"
+	"github.com/eapache/queue"
+)
+
+// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
+// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
+// and parses responses for errors. You must read from the Errors() channel or the
+// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
+// leaks: it will not be garbage-collected automatically when it passes out of
+// scope.
+type AsyncProducer interface {
+
+	// AsyncClose triggers a shutdown of the producer. The shutdown has completed
+	// when both the Errors and Successes channels have been closed. When calling
+	// AsyncClose, you *must* continue to read from those channels in order to
+	// drain the results of any messages in flight.
+	AsyncClose()
+
+	// Close shuts down the producer and waits for any buffered messages to be
+	// flushed. You must call this function before a producer object passes out of
+	// scope, as it may otherwise leak memory. You must call this before calling
+	// Close on the underlying client.
+	Close() error
+
+	// Input is the input channel for the user to write messages to that they
+	// wish to send.
+	Input() chan<- *ProducerMessage
+
+	// Successes is the success output channel back to the user when Return.Successes is
+	// enabled. If Return.Successes is true, you MUST read from this channel or the
+	// Producer will deadlock. It is suggested that you send and read messages
+	// together in a single select statement.
+	Successes() <-chan *ProducerMessage
+
+	// Errors is the error output channel back to the user. You MUST read from this
+	// channel or the Producer will deadlock when the channel is full. Alternatively,
+	// you can set Producer.Return.Errors in your config to false, which prevents
+	// errors to be returned.
+	Errors() <-chan *ProducerError
+}
+
+// transactionManager keeps the state necessary to ensure idempotent production
+type transactionManager struct {
+	producerID      int64
+	producerEpoch   int16
+	sequenceNumbers map[string]int32
+	mutex           sync.Mutex
+}
+
+const (
+	noProducerID    = -1
+	noProducerEpoch = -1
+)
+
+func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) int32 {
+	key := fmt.Sprintf("%s-%d", topic, partition)
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	sequence := t.sequenceNumbers[key]
+	t.sequenceNumbers[key] = sequence + 1
+	return sequence
+}
+
+func newTransactionManager(conf *Config, client Client) (*transactionManager, error) {
+	txnmgr := &transactionManager{
+		producerID:    noProducerID,
+		producerEpoch: noProducerEpoch,
+	}
+
+	if conf.Producer.Idempotent {
+		initProducerIDResponse, err := client.InitProducerID()
+		if err != nil {
+			return nil, err
+		}
+		txnmgr.producerID = initProducerIDResponse.ProducerID
+		txnmgr.producerEpoch = initProducerIDResponse.ProducerEpoch
+		txnmgr.sequenceNumbers = make(map[string]int32)
+		txnmgr.mutex = sync.Mutex{}
+
+		Logger.Printf("Obtained a ProducerId: %d and ProducerEpoch: %d\n", txnmgr.producerID, txnmgr.producerEpoch)
+	}
+
+	return txnmgr, nil
+}
+
+type asyncProducer struct {
+	client    Client
+	conf      *Config
+	ownClient bool
+
+	errors                    chan *ProducerError
+	input, successes, retries chan *ProducerMessage
+	inFlight                  sync.WaitGroup
+
+	brokers    map[*Broker]*brokerProducer
+	brokerRefs map[*brokerProducer]int
+	brokerLock sync.Mutex
+
+	txnmgr *transactionManager
+}
+
+// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
+func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
+	client, err := NewClient(addrs, conf)
+	if err != nil {
+		return nil, err
+	}
+
+	p, err := NewAsyncProducerFromClient(client)
+	if err != nil {
+		return nil, err
+	}
+	p.(*asyncProducer).ownClient = true
+	return p, nil
+}
+
+// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
+	// Check that we are not dealing with a closed Client before processing any other arguments
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	txnmgr, err := newTransactionManager(client.Config(), client)
+	if err != nil {
+		return nil, err
+	}
+
+	p := &asyncProducer{
+		client:     client,
+		conf:       client.Config(),
+		errors:     make(chan *ProducerError),
+		input:      make(chan *ProducerMessage),
+		successes:  make(chan *ProducerMessage),
+		retries:    make(chan *ProducerMessage),
+		brokers:    make(map[*Broker]*brokerProducer),
+		brokerRefs: make(map[*brokerProducer]int),
+		txnmgr:     txnmgr,
+	}
+
+	// launch our singleton dispatchers
+	go withRecover(p.dispatcher)
+	go withRecover(p.retryHandler)
+
+	return p, nil
+}
+
+type flagSet int8
+
+const (
+	syn      flagSet = 1 << iota // first message from partitionProducer to brokerProducer
+	fin                          // final message from partitionProducer to brokerProducer and back
+	shutdown                     // start the shutdown process
+)
+
+// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
+type ProducerMessage struct {
+	Topic string // The Kafka topic for this message.
+	// The partitioning key for this message. Pre-existing Encoders include
+	// StringEncoder and ByteEncoder.
+	Key Encoder
+	// The actual message to store in Kafka. Pre-existing Encoders include
+	// StringEncoder and ByteEncoder.
+	Value Encoder
+
+	// The headers are key-value pairs that are transparently passed
+	// by Kafka between producers and consumers.
+	Headers []RecordHeader
+
+	// This field is used to hold arbitrary data you wish to include so it
+	// will be available when receiving on the Successes and Errors channels.
+	// Sarama completely ignores this field and is only to be used for
+	// pass-through data.
+	Metadata interface{}
+
+	// Below this point are filled in by the producer as the message is processed
+
+	// Offset is the offset of the message stored on the broker. This is only
+	// guaranteed to be defined if the message was successfully delivered and
+	// RequiredAcks is not NoResponse.
+	Offset int64
+	// Partition is the partition that the message was sent to. This is only
+	// guaranteed to be defined if the message was successfully delivered.
+	Partition int32
+	// Timestamp is the timestamp assigned to the message by the broker. This
+	// is only guaranteed to be defined if the message was successfully
+	// delivered, RequiredAcks is not NoResponse, and the Kafka broker is at
+	// least version 0.10.0.
+	Timestamp time.Time
+
+	retries        int
+	flags          flagSet
+	expectation    chan *ProducerError
+	sequenceNumber int32
+}
+
+const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
+
+func (m *ProducerMessage) byteSize(version int) int {
+	var size int
+	if version >= 2 {
+		size = maximumRecordOverhead
+		for _, h := range m.Headers {
+			size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32
+		}
+	} else {
+		size = producerMessageOverhead
+	}
+	if m.Key != nil {
+		size += m.Key.Length()
+	}
+	if m.Value != nil {
+		size += m.Value.Length()
+	}
+	return size
+}
+
+func (m *ProducerMessage) clear() {
+	m.flags = 0
+	m.retries = 0
+}
+
+// ProducerError is the type of error generated when the producer fails to deliver a message.
+// It contains the original ProducerMessage as well as the actual error value.
+type ProducerError struct {
+	Msg *ProducerMessage
+	Err error
+}
+
+func (pe ProducerError) Error() string {
+	return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
+}
+
+// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
+// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
+// when closing a producer.
+type ProducerErrors []*ProducerError
+
+func (pe ProducerErrors) Error() string {
+	return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
+}
+
+func (p *asyncProducer) Errors() <-chan *ProducerError {
+	return p.errors
+}
+
+func (p *asyncProducer) Successes() <-chan *ProducerMessage {
+	return p.successes
+}
+
+func (p *asyncProducer) Input() chan<- *ProducerMessage {
+	return p.input
+}
+
+func (p *asyncProducer) Close() error {
+	p.AsyncClose()
+
+	if p.conf.Producer.Return.Successes {
+		go withRecover(func() {
+			for range p.successes {
+			}
+		})
+	}
+
+	var errors ProducerErrors
+	if p.conf.Producer.Return.Errors {
+		for event := range p.errors {
+			errors = append(errors, event)
+		}
+	} else {
+		<-p.errors
+	}
+
+	if len(errors) > 0 {
+		return errors
+	}
+	return nil
+}
+
+func (p *asyncProducer) AsyncClose() {
+	go withRecover(p.shutdown)
+}
+
+// singleton
+// dispatches messages by topic
+func (p *asyncProducer) dispatcher() {
+	handlers := make(map[string]chan<- *ProducerMessage)
+	shuttingDown := false
+
+	for msg := range p.input {
+		if msg == nil {
+			Logger.Println("Something tried to send a nil message, it was ignored.")
+			continue
+		}
+
+		if msg.flags&shutdown != 0 {
+			shuttingDown = true
+			p.inFlight.Done()
+			continue
+		} else if msg.retries == 0 {
+			if shuttingDown {
+				// we can't just call returnError here because that decrements the wait group,
+				// which hasn't been incremented yet for this message, and shouldn't be
+				pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
+				if p.conf.Producer.Return.Errors {
+					p.errors <- pErr
+				} else {
+					Logger.Println(pErr)
+				}
+				continue
+			}
+			p.inFlight.Add(1)
+		}
+
+		version := 1
+		if p.conf.Version.IsAtLeast(V0_11_0_0) {
+			version = 2
+		} else if msg.Headers != nil {
+			p.returnError(msg, ConfigurationError("Producing headers requires Kafka at least v0.11"))
+			continue
+		}
+		if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes {
+			p.returnError(msg, ErrMessageSizeTooLarge)
+			continue
+		}
+
+		handler := handlers[msg.Topic]
+		if handler == nil {
+			handler = p.newTopicProducer(msg.Topic)
+			handlers[msg.Topic] = handler
+		}
+
+		handler <- msg
+	}
+
+	for _, handler := range handlers {
+		close(handler)
+	}
+}
+
+// one per topic
+// partitions messages, then dispatches them by partition
+type topicProducer struct {
+	parent *asyncProducer
+	topic  string
+	input  <-chan *ProducerMessage
+
+	breaker     *breaker.Breaker
+	handlers    map[int32]chan<- *ProducerMessage
+	partitioner Partitioner
+}
+
+func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
+	input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+	tp := &topicProducer{
+		parent:      p,
+		topic:       topic,
+		input:       input,
+		breaker:     breaker.New(3, 1, 10*time.Second),
+		handlers:    make(map[int32]chan<- *ProducerMessage),
+		partitioner: p.conf.Producer.Partitioner(topic),
+	}
+	go withRecover(tp.dispatch)
+	return input
+}
+
+func (tp *topicProducer) dispatch() {
+	for msg := range tp.input {
+		if msg.retries == 0 {
+			if err := tp.partitionMessage(msg); err != nil {
+				tp.parent.returnError(msg, err)
+				continue
+			}
+		}
+		// All messages being retried (sent or not) have already had their retry count updated
+		if tp.parent.conf.Producer.Idempotent && msg.retries == 0 {
+			msg.sequenceNumber = tp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition)
+		}
+
+		handler := tp.handlers[msg.Partition]
+		if handler == nil {
+			handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
+			tp.handlers[msg.Partition] = handler
+		}
+
+		handler <- msg
+	}
+
+	for _, handler := range tp.handlers {
+		close(handler)
+	}
+}
+
+func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
+	var partitions []int32
+
+	err := tp.breaker.Run(func() (err error) {
+		var requiresConsistency = false
+		if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok {
+			requiresConsistency = ep.MessageRequiresConsistency(msg)
+		} else {
+			requiresConsistency = tp.partitioner.RequiresConsistency()
+		}
+
+		if requiresConsistency {
+			partitions, err = tp.parent.client.Partitions(msg.Topic)
+		} else {
+			partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
+		}
+		return
+	})
+
+	if err != nil {
+		return err
+	}
+
+	numPartitions := int32(len(partitions))
+
+	if numPartitions == 0 {
+		return ErrLeaderNotAvailable
+	}
+
+	choice, err := tp.partitioner.Partition(msg, numPartitions)
+
+	if err != nil {
+		return err
+	} else if choice < 0 || choice >= numPartitions {
+		return ErrInvalidPartition
+	}
+
+	msg.Partition = partitions[choice]
+
+	return nil
+}
+
+// one per partition per topic
+// dispatches messages to the appropriate broker
+// also responsible for maintaining message order during retries
+type partitionProducer struct {
+	parent    *asyncProducer
+	topic     string
+	partition int32
+	input     <-chan *ProducerMessage
+
+	leader         *Broker
+	breaker        *breaker.Breaker
+	brokerProducer *brokerProducer
+
+	// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
+	// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
+	// retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
+	// therefore whether our buffer is complete and safe to flush)
+	highWatermark int
+	retryState    []partitionRetryState
+}
+
+type partitionRetryState struct {
+	buf          []*ProducerMessage
+	expectChaser bool
+}
+
+func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
+	input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+	pp := &partitionProducer{
+		parent:    p,
+		topic:     topic,
+		partition: partition,
+		input:     input,
+
+		breaker:    breaker.New(3, 1, 10*time.Second),
+		retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
+	}
+	go withRecover(pp.dispatch)
+	return input
+}
+
+func (pp *partitionProducer) dispatch() {
+	// try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
+	// on the first message
+	pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
+	if pp.leader != nil {
+		pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
+		pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+		pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+	}
+
+	for msg := range pp.input {
+		if msg.retries > pp.highWatermark {
+			// a new, higher, retry level; handle it and then back off
+			pp.newHighWatermark(msg.retries)
+			time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
+		} else if pp.highWatermark > 0 {
+			// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
+			if msg.retries < pp.highWatermark {
+				// in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
+				if msg.flags&fin == fin {
+					pp.retryState[msg.retries].expectChaser = false
+					pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+				} else {
+					pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
+				}
+				continue
+			} else if msg.flags&fin == fin {
+				// this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
+				// meaning this retry level is done and we can go down (at least) one level and flush that
+				pp.retryState[pp.highWatermark].expectChaser = false
+				pp.flushRetryBuffers()
+				pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+				continue
+			}
+		}
+
+		// if we made it this far then the current msg contains real data, and can be sent to the next goroutine
+		// without breaking any of our ordering guarantees
+
+		if pp.brokerProducer == nil {
+			if err := pp.updateLeader(); err != nil {
+				pp.parent.returnError(msg, err)
+				time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
+				continue
+			}
+			Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+		}
+
+		pp.brokerProducer.input <- msg
+	}
+
+	if pp.brokerProducer != nil {
+		pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
+	}
+}
+
+func (pp *partitionProducer) newHighWatermark(hwm int) {
+	Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
+	pp.highWatermark = hwm
+
+	// send off a fin so that we know when everything "in between" has made it
+	// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
+	pp.retryState[pp.highWatermark].expectChaser = true
+	pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
+	pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
+
+	// a new HWM means that our current broker selection is out of date
+	Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+	pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
+	pp.brokerProducer = nil
+}
+
+func (pp *partitionProducer) flushRetryBuffers() {
+	Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+	for {
+		pp.highWatermark--
+
+		if pp.brokerProducer == nil {
+			if err := pp.updateLeader(); err != nil {
+				pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
+				goto flushDone
+			}
+			Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+		}
+
+		for _, msg := range pp.retryState[pp.highWatermark].buf {
+			pp.brokerProducer.input <- msg
+		}
+
+	flushDone:
+		pp.retryState[pp.highWatermark].buf = nil
+		if pp.retryState[pp.highWatermark].expectChaser {
+			Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+			break
+		} else if pp.highWatermark == 0 {
+			Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
+			break
+		}
+	}
+}
+
+func (pp *partitionProducer) updateLeader() error {
+	return pp.breaker.Run(func() (err error) {
+		if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
+			return err
+		}
+
+		if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
+			return err
+		}
+
+		pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
+		pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+		pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+
+		return nil
+	})
+}
+
+// one per broker; also constructs an associated flusher
+func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer {
+	var (
+		input     = make(chan *ProducerMessage)
+		bridge    = make(chan *produceSet)
+		responses = make(chan *brokerProducerResponse)
+	)
+
+	bp := &brokerProducer{
+		parent:         p,
+		broker:         broker,
+		input:          input,
+		output:         bridge,
+		responses:      responses,
+		buffer:         newProduceSet(p),
+		currentRetries: make(map[string]map[int32]error),
+	}
+	go withRecover(bp.run)
+
+	// minimal bridge to make the network response `select`able
+	go withRecover(func() {
+		for set := range bridge {
+			request := set.buildRequest()
+
+			response, err := broker.Produce(request)
+
+			responses <- &brokerProducerResponse{
+				set: set,
+				err: err,
+				res: response,
+			}
+		}
+		close(responses)
+	})
+
+	return bp
+}
+
+type brokerProducerResponse struct {
+	set *produceSet
+	err error
+	res *ProduceResponse
+}
+
+// groups messages together into appropriately-sized batches for sending to the broker
+// handles state related to retries etc
+type brokerProducer struct {
+	parent *asyncProducer
+	broker *Broker
+
+	input     chan *ProducerMessage
+	output    chan<- *produceSet
+	responses <-chan *brokerProducerResponse
+
+	buffer     *produceSet
+	timer      <-chan time.Time
+	timerFired bool
+
+	closing        error
+	currentRetries map[string]map[int32]error
+}
+
+func (bp *brokerProducer) run() {
+	var output chan<- *produceSet
+	Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
+
+	for {
+		select {
+		case msg := <-bp.input:
+			if msg == nil {
+				bp.shutdown()
+				return
+			}
+
+			if msg.flags&syn == syn {
+				Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
+					bp.broker.ID(), msg.Topic, msg.Partition)
+				if bp.currentRetries[msg.Topic] == nil {
+					bp.currentRetries[msg.Topic] = make(map[int32]error)
+				}
+				bp.currentRetries[msg.Topic][msg.Partition] = nil
+				bp.parent.inFlight.Done()
+				continue
+			}
+
+			if reason := bp.needsRetry(msg); reason != nil {
+				bp.parent.retryMessage(msg, reason)
+
+				if bp.closing == nil && msg.flags&fin == fin {
+					// we were retrying this partition but we can start processing again
+					delete(bp.currentRetries[msg.Topic], msg.Partition)
+					Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
+						bp.broker.ID(), msg.Topic, msg.Partition)
+				}
+
+				continue
+			}
+
+			if bp.buffer.wouldOverflow(msg) {
+				if err := bp.waitForSpace(msg); err != nil {
+					bp.parent.retryMessage(msg, err)
+					continue
+				}
+			}
+
+			if err := bp.buffer.add(msg); err != nil {
+				bp.parent.returnError(msg, err)
+				continue
+			}
+
+			if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
+				bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
+			}
+		case <-bp.timer:
+			bp.timerFired = true
+		case output <- bp.buffer:
+			bp.rollOver()
+		case response := <-bp.responses:
+			bp.handleResponse(response)
+		}
+
+		if bp.timerFired || bp.buffer.readyToFlush() {
+			output = bp.output
+		} else {
+			output = nil
+		}
+	}
+}
+
+func (bp *brokerProducer) shutdown() {
+	for !bp.buffer.empty() {
+		select {
+		case response := <-bp.responses:
+			bp.handleResponse(response)
+		case bp.output <- bp.buffer:
+			bp.rollOver()
+		}
+	}
+	close(bp.output)
+	for response := range bp.responses {
+		bp.handleResponse(response)
+	}
+
+	Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
+}
+
+func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
+	if bp.closing != nil {
+		return bp.closing
+	}
+
+	return bp.currentRetries[msg.Topic][msg.Partition]
+}
+
+func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
+	Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
+
+	for {
+		select {
+		case response := <-bp.responses:
+			bp.handleResponse(response)
+			// handling a response can change our state, so re-check some things
+			if reason := bp.needsRetry(msg); reason != nil {
+				return reason
+			} else if !bp.buffer.wouldOverflow(msg) {
+				return nil
+			}
+		case bp.output <- bp.buffer:
+			bp.rollOver()
+			return nil
+		}
+	}
+}
+
+func (bp *brokerProducer) rollOver() {
+	bp.timer = nil
+	bp.timerFired = false
+	bp.buffer = newProduceSet(bp.parent)
+}
+
+func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
+	if response.err != nil {
+		bp.handleError(response.set, response.err)
+	} else {
+		bp.handleSuccess(response.set, response.res)
+	}
+
+	if bp.buffer.empty() {
+		bp.rollOver() // this can happen if the response invalidated our buffer
+	}
+}
+
+func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
+	// we iterate through the blocks in the request set, not the response, so that we notice
+	// if the response is missing a block completely
+	var retryTopics []string
+	sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+		if response == nil {
+			// this only happens when RequiredAcks is NoResponse, so we have to assume success
+			bp.parent.returnSuccesses(pSet.msgs)
+			return
+		}
+
+		block := response.GetBlock(topic, partition)
+		if block == nil {
+			bp.parent.returnErrors(pSet.msgs, ErrIncompleteResponse)
+			return
+		}
+
+		switch block.Err {
+		// Success
+		case ErrNoError:
+			if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
+				for _, msg := range pSet.msgs {
+					msg.Timestamp = block.Timestamp
+				}
+			}
+			for i, msg := range pSet.msgs {
+				msg.Offset = block.Offset + int64(i)
+			}
+			bp.parent.returnSuccesses(pSet.msgs)
+		// Duplicate
+		case ErrDuplicateSequenceNumber:
+			bp.parent.returnSuccesses(pSet.msgs)
+		// Retriable errors
+		case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
+			ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
+			retryTopics = append(retryTopics, topic)
+		// Other non-retriable errors
+		default:
+			bp.parent.returnErrors(pSet.msgs, block.Err)
+		}
+	})
+
+	if len(retryTopics) > 0 {
+		if bp.parent.conf.Producer.Idempotent {
+			err := bp.parent.client.RefreshMetadata(retryTopics...)
+			if err != nil {
+				Logger.Printf("Failed refreshing metadata because of %v\n", err)
+			}
+		}
+
+		sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+			block := response.GetBlock(topic, partition)
+			if block == nil {
+				// handled in the previous "eachPartition" loop
+				return
+			}
+
+			switch block.Err {
+			case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
+				ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
+				Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
+					bp.broker.ID(), topic, partition, block.Err)
+				if bp.currentRetries[topic] == nil {
+					bp.currentRetries[topic] = make(map[int32]error)
+				}
+				bp.currentRetries[topic][partition] = block.Err
+				if bp.parent.conf.Producer.Idempotent {
+					go bp.parent.retryBatch(topic, partition, pSet, block.Err)
+				} else {
+					bp.parent.retryMessages(pSet.msgs, block.Err)
+				}
+				// dropping the following messages has the side effect of incrementing their retry count
+				bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
+			}
+		})
+	}
+}
+
+func (p *asyncProducer) retryBatch(topic string, partition int32, pSet *partitionSet, kerr KError) {
+	Logger.Printf("Retrying batch for %v-%d because of %s\n", topic, partition, kerr)
+	produceSet := newProduceSet(p)
+	produceSet.msgs[topic] = make(map[int32]*partitionSet)
+	produceSet.msgs[topic][partition] = pSet
+	produceSet.bufferBytes += pSet.bufferBytes
+	produceSet.bufferCount += len(pSet.msgs)
+	for _, msg := range pSet.msgs {
+		if msg.retries >= p.conf.Producer.Retry.Max {
+			p.returnError(msg, kerr)
+			return
+		}
+		msg.retries++
+	}
+
+	// it's expected that a metadata refresh has been requested prior to calling retryBatch
+	leader, err := p.client.Leader(topic, partition)
+	if err != nil {
+		Logger.Printf("Failed retrying batch for %v-%d because of %v while looking up for new leader\n", topic, partition, err)
+		for _, msg := range pSet.msgs {
+			p.returnError(msg, kerr)
+		}
+		return
+	}
+	bp := p.getBrokerProducer(leader)
+	bp.output <- produceSet
+}
+
+func (bp *brokerProducer) handleError(sent *produceSet, err error) {
+	switch err.(type) {
+	case PacketEncodingError:
+		sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+			bp.parent.returnErrors(pSet.msgs, err)
+		})
+	default:
+		Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
+		bp.parent.abandonBrokerConnection(bp.broker)
+		_ = bp.broker.Close()
+		bp.closing = err
+		sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+			bp.parent.retryMessages(pSet.msgs, err)
+		})
+		bp.buffer.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+			bp.parent.retryMessages(pSet.msgs, err)
+		})
+		bp.rollOver()
+	}
+}
+
+// singleton
+// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
+// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
+func (p *asyncProducer) retryHandler() {
+	var msg *ProducerMessage
+	buf := queue.New()
+
+	for {
+		if buf.Length() == 0 {
+			msg = <-p.retries
+		} else {
+			select {
+			case msg = <-p.retries:
+			case p.input <- buf.Peek().(*ProducerMessage):
+				buf.Remove()
+				continue
+			}
+		}
+
+		if msg == nil {
+			return
+		}
+
+		buf.Add(msg)
+	}
+}
+
+// utility functions
+
+func (p *asyncProducer) shutdown() {
+	Logger.Println("Producer shutting down.")
+	p.inFlight.Add(1)
+	p.input <- &ProducerMessage{flags: shutdown}
+
+	p.inFlight.Wait()
+
+	if p.ownClient {
+		err := p.client.Close()
+		if err != nil {
+			Logger.Println("producer/shutdown failed to close the embedded client:", err)
+		}
+	}
+
+	close(p.input)
+	close(p.retries)
+	close(p.errors)
+	close(p.successes)
+}
+
+func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
+	msg.clear()
+	pErr := &ProducerError{Msg: msg, Err: err}
+	if p.conf.Producer.Return.Errors {
+		p.errors <- pErr
+	} else {
+		Logger.Println(pErr)
+	}
+	p.inFlight.Done()
+}
+
+func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
+	for _, msg := range batch {
+		p.returnError(msg, err)
+	}
+}
+
+func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
+	for _, msg := range batch {
+		if p.conf.Producer.Return.Successes {
+			msg.clear()
+			p.successes <- msg
+		}
+		p.inFlight.Done()
+	}
+}
+
+func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
+	if msg.retries >= p.conf.Producer.Retry.Max {
+		p.returnError(msg, err)
+	} else {
+		msg.retries++
+		p.retries <- msg
+	}
+}
+
+func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
+	for _, msg := range batch {
+		p.retryMessage(msg, err)
+	}
+}
+
+func (p *asyncProducer) getBrokerProducer(broker *Broker) *brokerProducer {
+	p.brokerLock.Lock()
+	defer p.brokerLock.Unlock()
+
+	bp := p.brokers[broker]
+
+	if bp == nil {
+		bp = p.newBrokerProducer(broker)
+		p.brokers[broker] = bp
+		p.brokerRefs[bp] = 0
+	}
+
+	p.brokerRefs[bp]++
+
+	return bp
+}
+
+func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp *brokerProducer) {
+	p.brokerLock.Lock()
+	defer p.brokerLock.Unlock()
+
+	p.brokerRefs[bp]--
+	if p.brokerRefs[bp] == 0 {
+		close(bp.input)
+		delete(p.brokerRefs, bp)
+
+		if p.brokers[broker] == bp {
+			delete(p.brokers, broker)
+		}
+	}
+}
+
+func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
+	p.brokerLock.Lock()
+	defer p.brokerLock.Unlock()
+
+	delete(p.brokers, broker)
+}
diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/Shopify/sarama/balance_strategy.go
new file mode 100644
index 0000000..e78988d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/balance_strategy.go
@@ -0,0 +1,129 @@
+package sarama
+
+import (
+	"math"
+	"sort"
+)
+
+// BalanceStrategyPlan is the results of any BalanceStrategy.Plan attempt.
+// It contains an allocation of topic/partitions by memberID in the form of
+// a `memberID -> topic -> partitions` map.
+type BalanceStrategyPlan map[string]map[string][]int32
+
+// Add assigns a topic with a number partitions to a member.
+func (p BalanceStrategyPlan) Add(memberID, topic string, partitions ...int32) {
+	if len(partitions) == 0 {
+		return
+	}
+	if _, ok := p[memberID]; !ok {
+		p[memberID] = make(map[string][]int32, 1)
+	}
+	p[memberID][topic] = append(p[memberID][topic], partitions...)
+}
+
+// --------------------------------------------------------------------
+
+// BalanceStrategy is used to balance topics and partitions
+// across memebers of a consumer group
+type BalanceStrategy interface {
+	// Name uniquely identifies the strategy.
+	Name() string
+
+	// Plan accepts a map of `memberID -> metadata` and a map of `topic -> partitions`
+	// and returns a distribution plan.
+	Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error)
+}
+
+// --------------------------------------------------------------------
+
+// BalanceStrategyRange is the default and assigns partitions as ranges to consumer group members.
+// Example with one topic T with six partitions (0..5) and two members (M1, M2):
+//   M1: {T: [0, 1, 2]}
+//   M2: {T: [3, 4, 5]}
+var BalanceStrategyRange = &balanceStrategy{
+	name: "range",
+	coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
+		step := float64(len(partitions)) / float64(len(memberIDs))
+
+		for i, memberID := range memberIDs {
+			pos := float64(i)
+			min := int(math.Floor(pos*step + 0.5))
+			max := int(math.Floor((pos+1)*step + 0.5))
+			plan.Add(memberID, topic, partitions[min:max]...)
+		}
+	},
+}
+
+// BalanceStrategyRoundRobin assigns partitions to members in alternating order.
+// Example with topic T with six partitions (0..5) and two members (M1, M2):
+//   M1: {T: [0, 2, 4]}
+//   M2: {T: [1, 3, 5]}
+var BalanceStrategyRoundRobin = &balanceStrategy{
+	name: "roundrobin",
+	coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
+		for i, part := range partitions {
+			memberID := memberIDs[i%len(memberIDs)]
+			plan.Add(memberID, topic, part)
+		}
+	},
+}
+
+// --------------------------------------------------------------------
+
+type balanceStrategy struct {
+	name   string
+	coreFn func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32)
+}
+
+// Name implements BalanceStrategy.
+func (s *balanceStrategy) Name() string { return s.name }
+
+// Balance implements BalanceStrategy.
+func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
+	// Build members by topic map
+	mbt := make(map[string][]string)
+	for memberID, meta := range members {
+		for _, topic := range meta.Topics {
+			mbt[topic] = append(mbt[topic], memberID)
+		}
+	}
+
+	// Sort members for each topic
+	for topic, memberIDs := range mbt {
+		sort.Sort(&balanceStrategySortable{
+			topic:     topic,
+			memberIDs: memberIDs,
+		})
+	}
+
+	// Assemble plan
+	plan := make(BalanceStrategyPlan, len(members))
+	for topic, memberIDs := range mbt {
+		s.coreFn(plan, memberIDs, topic, topics[topic])
+	}
+	return plan, nil
+}
+
+type balanceStrategySortable struct {
+	topic     string
+	memberIDs []string
+}
+
+func (p balanceStrategySortable) Len() int { return len(p.memberIDs) }
+func (p balanceStrategySortable) Swap(i, j int) {
+	p.memberIDs[i], p.memberIDs[j] = p.memberIDs[j], p.memberIDs[i]
+}
+func (p balanceStrategySortable) Less(i, j int) bool {
+	return balanceStrategyHashValue(p.topic, p.memberIDs[i]) < balanceStrategyHashValue(p.topic, p.memberIDs[j])
+}
+
+func balanceStrategyHashValue(vv ...string) uint32 {
+	h := uint32(2166136261)
+	for _, s := range vv {
+		for _, c := range s {
+			h ^= uint32(c)
+			h *= 16777619
+		}
+	}
+	return h
+}
diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go
new file mode 100644
index 0000000..6a33b80
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/broker.go
@@ -0,0 +1,895 @@
+package sarama
+
+import (
+	"crypto/tls"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"net"
+	"strconv"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
+type Broker struct {
+	id   int32
+	addr string
+	rack *string
+
+	conf          *Config
+	correlationID int32
+	conn          net.Conn
+	connErr       error
+	lock          sync.Mutex
+	opened        int32
+
+	responses chan responsePromise
+	done      chan bool
+
+	incomingByteRate       metrics.Meter
+	requestRate            metrics.Meter
+	requestSize            metrics.Histogram
+	requestLatency         metrics.Histogram
+	outgoingByteRate       metrics.Meter
+	responseRate           metrics.Meter
+	responseSize           metrics.Histogram
+	brokerIncomingByteRate metrics.Meter
+	brokerRequestRate      metrics.Meter
+	brokerRequestSize      metrics.Histogram
+	brokerRequestLatency   metrics.Histogram
+	brokerOutgoingByteRate metrics.Meter
+	brokerResponseRate     metrics.Meter
+	brokerResponseSize     metrics.Histogram
+}
+
+type responsePromise struct {
+	requestTime   time.Time
+	correlationID int32
+	packets       chan []byte
+	errors        chan error
+}
+
+// NewBroker creates and returns a Broker targeting the given host:port address.
+// This does not attempt to actually connect, you have to call Open() for that.
+func NewBroker(addr string) *Broker {
+	return &Broker{id: -1, addr: addr}
+}
+
+// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
+// waiting for the connection to complete. This means that any subsequent operations on the broker will
+// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
+// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
+// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
+func (b *Broker) Open(conf *Config) error {
+	if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
+		return ErrAlreadyConnected
+	}
+
+	if conf == nil {
+		conf = NewConfig()
+	}
+
+	err := conf.Validate()
+	if err != nil {
+		return err
+	}
+
+	b.lock.Lock()
+
+	go withRecover(func() {
+		defer b.lock.Unlock()
+
+		dialer := net.Dialer{
+			Timeout:   conf.Net.DialTimeout,
+			KeepAlive: conf.Net.KeepAlive,
+			LocalAddr: conf.Net.LocalAddr,
+		}
+
+		if conf.Net.TLS.Enable {
+			b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
+		} else {
+			b.conn, b.connErr = dialer.Dial("tcp", b.addr)
+		}
+		if b.connErr != nil {
+			Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
+			b.conn = nil
+			atomic.StoreInt32(&b.opened, 0)
+			return
+		}
+		b.conn = newBufConn(b.conn)
+
+		b.conf = conf
+
+		// Create or reuse the global metrics shared between brokers
+		b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry)
+		b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry)
+		b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry)
+		b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry)
+		b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
+		b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
+		b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
+		// Do not gather metrics for seeded broker (only used during bootstrap) because they share
+		// the same id (-1) and are already exposed through the global metrics above
+		if b.id >= 0 {
+			b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry)
+			b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry)
+			b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry)
+			b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry)
+			b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry)
+			b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry)
+			b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry)
+		}
+
+		if conf.Net.SASL.Enable {
+			b.connErr = b.sendAndReceiveSASLPlainAuth()
+			if b.connErr != nil {
+				err = b.conn.Close()
+				if err == nil {
+					Logger.Printf("Closed connection to broker %s\n", b.addr)
+				} else {
+					Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
+				}
+				b.conn = nil
+				atomic.StoreInt32(&b.opened, 0)
+				return
+			}
+		}
+
+		b.done = make(chan bool)
+		b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
+
+		if b.id >= 0 {
+			Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
+		} else {
+			Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
+		}
+		go withRecover(b.responseReceiver)
+	})
+
+	return nil
+}
+
+// Connected returns true if the broker is connected and false otherwise. If the broker is not
+// connected but it had tried to connect, the error from that connection attempt is also returned.
+func (b *Broker) Connected() (bool, error) {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	return b.conn != nil, b.connErr
+}
+
+func (b *Broker) Close() error {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	if b.conn == nil {
+		return ErrNotConnected
+	}
+
+	close(b.responses)
+	<-b.done
+
+	err := b.conn.Close()
+
+	b.conn = nil
+	b.connErr = nil
+	b.done = nil
+	b.responses = nil
+
+	if b.id >= 0 {
+		b.conf.MetricRegistry.Unregister(getMetricNameForBroker("incoming-byte-rate", b))
+		b.conf.MetricRegistry.Unregister(getMetricNameForBroker("request-rate", b))
+		b.conf.MetricRegistry.Unregister(getMetricNameForBroker("outgoing-byte-rate", b))
+		b.conf.MetricRegistry.Unregister(getMetricNameForBroker("response-rate", b))
+	}
+
+	if err == nil {
+		Logger.Printf("Closed connection to broker %s\n", b.addr)
+	} else {
+		Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
+	}
+
+	atomic.StoreInt32(&b.opened, 0)
+
+	return err
+}
+
+// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
+func (b *Broker) ID() int32 {
+	return b.id
+}
+
+// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
+func (b *Broker) Addr() string {
+	return b.addr
+}
+
+// Rack returns the broker's rack as retrieved from Kafka's metadata or the
+// empty string if it is not known.  The returned value corresponds to the
+// broker's broker.rack configuration setting.  Requires protocol version to be
+// at least v0.10.0.0.
+func (b *Broker) Rack() string {
+	if b.rack == nil {
+		return ""
+	}
+	return *b.rack
+}
+
+func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
+	response := new(MetadataResponse)
+
+	err := b.sendAndReceive(request, response)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
+	response := new(ConsumerMetadataResponse)
+
+	err := b.sendAndReceive(request, response)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) {
+	response := new(FindCoordinatorResponse)
+
+	err := b.sendAndReceive(request, response)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
+	response := new(OffsetResponse)
+
+	err := b.sendAndReceive(request, response)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
+	var response *ProduceResponse
+	var err error
+
+	if request.RequiredAcks == NoResponse {
+		err = b.sendAndReceive(request, nil)
+	} else {
+		response = new(ProduceResponse)
+		err = b.sendAndReceive(request, response)
+	}
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
+	response := new(FetchResponse)
+
+	err := b.sendAndReceive(request, response)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
+	response := new(OffsetCommitResponse)
+
+	err := b.sendAndReceive(request, response)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
+	response := new(OffsetFetchResponse)
+
+	err := b.sendAndReceive(request, response)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
+	response := new(JoinGroupResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
+	response := new(SyncGroupResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
+	response := new(LeaveGroupResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
+	response := new(HeartbeatResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
+	response := new(ListGroupsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
+	response := new(DescribeGroupsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
+	response := new(ApiVersionsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) {
+	response := new(CreateTopicsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) {
+	response := new(DeleteTopicsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) {
+	response := new(CreatePartitionsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) {
+	response := new(DeleteRecordsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) {
+	response := new(DescribeAclsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) {
+	response := new(CreateAclsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) {
+	response := new(DeleteAclsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) {
+	response := new(InitProducerIDResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) {
+	response := new(AddPartitionsToTxnResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) {
+	response := new(AddOffsetsToTxnResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) {
+	response := new(EndTxnResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) {
+	response := new(TxnOffsetCommitResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) {
+	response := new(DescribeConfigsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) {
+	response := new(AlterConfigsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) {
+	response := new(DeleteGroupsResponse)
+
+	if err := b.sendAndReceive(request, response); err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	if b.conn == nil {
+		if b.connErr != nil {
+			return nil, b.connErr
+		}
+		return nil, ErrNotConnected
+	}
+
+	if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
+		return nil, ErrUnsupportedVersion
+	}
+
+	req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+	buf, err := encode(req, b.conf.MetricRegistry)
+	if err != nil {
+		return nil, err
+	}
+
+	err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
+	if err != nil {
+		return nil, err
+	}
+
+	requestTime := time.Now()
+	bytes, err := b.conn.Write(buf)
+	b.updateOutgoingCommunicationMetrics(bytes)
+	if err != nil {
+		return nil, err
+	}
+	b.correlationID++
+
+	if !promiseResponse {
+		// Record request latency without the response
+		b.updateRequestLatencyMetrics(time.Since(requestTime))
+		return nil, nil
+	}
+
+	promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)}
+	b.responses <- promise
+
+	return &promise, nil
+}
+
+func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error {
+	promise, err := b.send(req, res != nil)
+
+	if err != nil {
+		return err
+	}
+
+	if promise == nil {
+		return nil
+	}
+
+	select {
+	case buf := <-promise.packets:
+		return versionedDecode(buf, res, req.version())
+	case err = <-promise.errors:
+		return err
+	}
+}
+
+func (b *Broker) decode(pd packetDecoder, version int16) (err error) {
+	b.id, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	host, err := pd.getString()
+	if err != nil {
+		return err
+	}
+
+	port, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	if version >= 1 {
+		b.rack, err = pd.getNullableString()
+		if err != nil {
+			return err
+		}
+	}
+
+	b.addr = net.JoinHostPort(host, fmt.Sprint(port))
+	if _, _, err := net.SplitHostPort(b.addr); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (b *Broker) encode(pe packetEncoder, version int16) (err error) {
+
+	host, portstr, err := net.SplitHostPort(b.addr)
+	if err != nil {
+		return err
+	}
+	port, err := strconv.Atoi(portstr)
+	if err != nil {
+		return err
+	}
+
+	pe.putInt32(b.id)
+
+	err = pe.putString(host)
+	if err != nil {
+		return err
+	}
+
+	pe.putInt32(int32(port))
+
+	if version >= 1 {
+		err = pe.putNullableString(b.rack)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (b *Broker) responseReceiver() {
+	var dead error
+	header := make([]byte, 8)
+	for response := range b.responses {
+		if dead != nil {
+			response.errors <- dead
+			continue
+		}
+
+		err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout))
+		if err != nil {
+			dead = err
+			response.errors <- err
+			continue
+		}
+
+		bytesReadHeader, err := io.ReadFull(b.conn, header)
+		requestLatency := time.Since(response.requestTime)
+		if err != nil {
+			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+			dead = err
+			response.errors <- err
+			continue
+		}
+
+		decodedHeader := responseHeader{}
+		err = decode(header, &decodedHeader)
+		if err != nil {
+			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+			dead = err
+			response.errors <- err
+			continue
+		}
+		if decodedHeader.correlationID != response.correlationID {
+			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+			// TODO if decoded ID < cur ID, discard until we catch up
+			// TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
+			dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
+			response.errors <- dead
+			continue
+		}
+
+		buf := make([]byte, decodedHeader.length-4)
+		bytesReadBody, err := io.ReadFull(b.conn, buf)
+		b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
+		if err != nil {
+			dead = err
+			response.errors <- err
+			continue
+		}
+
+		response.packets <- buf
+	}
+	close(b.done)
+}
+
+func (b *Broker) sendAndReceiveSASLPlainHandshake() error {
+	rb := &SaslHandshakeRequest{"PLAIN"}
+	req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+	buf, err := encode(req, b.conf.MetricRegistry)
+	if err != nil {
+		return err
+	}
+
+	err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
+	if err != nil {
+		return err
+	}
+
+	requestTime := time.Now()
+	bytes, err := b.conn.Write(buf)
+	b.updateOutgoingCommunicationMetrics(bytes)
+	if err != nil {
+		Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
+		return err
+	}
+	b.correlationID++
+	//wait for the response
+	header := make([]byte, 8) // response header
+	_, err = io.ReadFull(b.conn, header)
+	if err != nil {
+		Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
+		return err
+	}
+	length := binary.BigEndian.Uint32(header[:4])
+	payload := make([]byte, length-4)
+	n, err := io.ReadFull(b.conn, payload)
+	if err != nil {
+		Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
+		return err
+	}
+	b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
+	res := &SaslHandshakeResponse{}
+	err = versionedDecode(payload, res, 0)
+	if err != nil {
+		Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
+		return err
+	}
+	if res.Err != ErrNoError {
+		Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
+		return res.Err
+	}
+	Logger.Print("Successful SASL handshake")
+	return nil
+}
+
+// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149)
+// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9
+//
+// In SASL Plain, Kafka expects the auth header to be in the following format
+// Message format (from https://tools.ietf.org/html/rfc4616):
+//
+//   message   = [authzid] UTF8NUL authcid UTF8NUL passwd
+//   authcid   = 1*SAFE ; MUST accept up to 255 octets
+//   authzid   = 1*SAFE ; MUST accept up to 255 octets
+//   passwd    = 1*SAFE ; MUST accept up to 255 octets
+//   UTF8NUL   = %x00 ; UTF-8 encoded NUL character
+//
+//   SAFE      = UTF1 / UTF2 / UTF3 / UTF4
+//                  ;; any UTF-8 encoded Unicode character except NUL
+//
+// When credentials are valid, Kafka returns a 4 byte array of null characters.
+// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way
+// of responding to bad credentials but thats how its being done today.
+func (b *Broker) sendAndReceiveSASLPlainAuth() error {
+	if b.conf.Net.SASL.Handshake {
+		handshakeErr := b.sendAndReceiveSASLPlainHandshake()
+		if handshakeErr != nil {
+			Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
+			return handshakeErr
+		}
+	}
+	length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
+	authBytes := make([]byte, length+4) //4 byte length header + auth data
+	binary.BigEndian.PutUint32(authBytes, uint32(length))
+	copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password))
+
+	err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
+	if err != nil {
+		Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error())
+		return err
+	}
+
+	requestTime := time.Now()
+	bytesWritten, err := b.conn.Write(authBytes)
+	b.updateOutgoingCommunicationMetrics(bytesWritten)
+	if err != nil {
+		Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
+		return err
+	}
+
+	header := make([]byte, 4)
+	n, err := io.ReadFull(b.conn, header)
+	b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
+	// If the credentials are valid, we would get a 4 byte response filled with null characters.
+	// Otherwise, the broker closes the connection and we get an EOF
+	if err != nil {
+		Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
+		return err
+	}
+
+	Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
+	return nil
+}
+
+func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
+	b.updateRequestLatencyMetrics(requestLatency)
+	b.responseRate.Mark(1)
+	if b.brokerResponseRate != nil {
+		b.brokerResponseRate.Mark(1)
+	}
+	responseSize := int64(bytes)
+	b.incomingByteRate.Mark(responseSize)
+	if b.brokerIncomingByteRate != nil {
+		b.brokerIncomingByteRate.Mark(responseSize)
+	}
+	b.responseSize.Update(responseSize)
+	if b.brokerResponseSize != nil {
+		b.brokerResponseSize.Update(responseSize)
+	}
+}
+
+func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) {
+	requestLatencyInMs := int64(requestLatency / time.Millisecond)
+	b.requestLatency.Update(requestLatencyInMs)
+	if b.brokerRequestLatency != nil {
+		b.brokerRequestLatency.Update(requestLatencyInMs)
+	}
+}
+
+func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
+	b.requestRate.Mark(1)
+	if b.brokerRequestRate != nil {
+		b.brokerRequestRate.Mark(1)
+	}
+	requestSize := int64(bytes)
+	b.outgoingByteRate.Mark(requestSize)
+	if b.brokerOutgoingByteRate != nil {
+		b.brokerOutgoingByteRate.Mark(requestSize)
+	}
+	b.requestSize.Update(requestSize)
+	if b.brokerRequestSize != nil {
+		b.brokerRequestSize.Update(requestSize)
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go
new file mode 100644
index 0000000..79be5ce
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/client.go
@@ -0,0 +1,899 @@
+package sarama
+
+import (
+	"math/rand"
+	"sort"
+	"sync"
+	"time"
+)
+
+// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
+// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
+// automatically when it passes out of scope. It is safe to share a client amongst many
+// users, however Kafka will process requests from a single client strictly in serial,
+// so it is generally more efficient to use the default one client per producer/consumer.
+type Client interface {
+	// Config returns the Config struct of the client. This struct should not be
+	// altered after it has been created.
+	Config() *Config
+
+	// Controller returns the cluster controller broker. Requires Kafka 0.10 or higher.
+	Controller() (*Broker, error)
+
+	// Brokers returns the current set of active brokers as retrieved from cluster metadata.
+	Brokers() []*Broker
+
+	// Topics returns the set of available topics as retrieved from cluster metadata.
+	Topics() ([]string, error)
+
+	// Partitions returns the sorted list of all partition IDs for the given topic.
+	Partitions(topic string) ([]int32, error)
+
+	// WritablePartitions returns the sorted list of all writable partition IDs for
+	// the given topic, where "writable" means "having a valid leader accepting
+	// writes".
+	WritablePartitions(topic string) ([]int32, error)
+
+	// Leader returns the broker object that is the leader of the current
+	// topic/partition, as determined by querying the cluster metadata.
+	Leader(topic string, partitionID int32) (*Broker, error)
+
+	// Replicas returns the set of all replica IDs for the given partition.
+	Replicas(topic string, partitionID int32) ([]int32, error)
+
+	// InSyncReplicas returns the set of all in-sync replica IDs for the given
+	// partition. In-sync replicas are replicas which are fully caught up with
+	// the partition leader.
+	InSyncReplicas(topic string, partitionID int32) ([]int32, error)
+
+	// RefreshMetadata takes a list of topics and queries the cluster to refresh the
+	// available metadata for those topics. If no topics are provided, it will refresh
+	// metadata for all topics.
+	RefreshMetadata(topics ...string) error
+
+	// GetOffset queries the cluster to get the most recent available offset at the
+	// given time (in milliseconds) on the topic/partition combination.
+	// Time should be OffsetOldest for the earliest available offset,
+	// OffsetNewest for the offset of the message that will be produced next, or a time.
+	GetOffset(topic string, partitionID int32, time int64) (int64, error)
+
+	// Coordinator returns the coordinating broker for a consumer group. It will
+	// return a locally cached value if it's available. You can call
+	// RefreshCoordinator to update the cached value. This function only works on
+	// Kafka 0.8.2 and higher.
+	Coordinator(consumerGroup string) (*Broker, error)
+
+	// RefreshCoordinator retrieves the coordinator for a consumer group and stores it
+	// in local cache. This function only works on Kafka 0.8.2 and higher.
+	RefreshCoordinator(consumerGroup string) error
+
+	// InitProducerID retrieves information required for Idempotent Producer
+	InitProducerID() (*InitProducerIDResponse, error)
+
+	// Close shuts down all broker connections managed by this client. It is required
+	// to call this function before a client object passes out of scope, as it will
+	// otherwise leak memory. You must close any Producers or Consumers using a client
+	// before you close the client.
+	Close() error
+
+	// Closed returns true if the client has already had Close called on it
+	Closed() bool
+}
+
+const (
+	// OffsetNewest stands for the log head offset, i.e. the offset that will be
+	// assigned to the next message that will be produced to the partition. You
+	// can send this to a client's GetOffset method to get this offset, or when
+	// calling ConsumePartition to start consuming new messages.
+	OffsetNewest int64 = -1
+	// OffsetOldest stands for the oldest offset available on the broker for a
+	// partition. You can send this to a client's GetOffset method to get this
+	// offset, or when calling ConsumePartition to start consuming from the
+	// oldest offset that is still available on the broker.
+	OffsetOldest int64 = -2
+)
+
+type client struct {
+	conf           *Config
+	closer, closed chan none // for shutting down background metadata updater
+
+	// the broker addresses given to us through the constructor are not guaranteed to be returned in
+	// the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
+	// so we store them separately
+	seedBrokers []*Broker
+	deadSeeds   []*Broker
+
+	controllerID   int32                                   // cluster controller broker id
+	brokers        map[int32]*Broker                       // maps broker ids to brokers
+	metadata       map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
+	metadataTopics map[string]none                         // topics that need to collect metadata
+	coordinators   map[string]int32                        // Maps consumer group names to coordinating broker IDs
+
+	// If the number of partitions is large, we can get some churn calling cachedPartitions,
+	// so the result is cached.  It is important to update this value whenever metadata is changed
+	cachedPartitionsResults map[string][maxPartitionIndex][]int32
+
+	lock sync.RWMutex // protects access to the maps that hold cluster state.
+}
+
+// NewClient creates a new Client. It connects to one of the given broker addresses
+// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
+// be retrieved from any of the given broker addresses, the client is not created.
+func NewClient(addrs []string, conf *Config) (Client, error) {
+	Logger.Println("Initializing new client")
+
+	if conf == nil {
+		conf = NewConfig()
+	}
+
+	if err := conf.Validate(); err != nil {
+		return nil, err
+	}
+
+	if len(addrs) < 1 {
+		return nil, ConfigurationError("You must provide at least one broker address")
+	}
+
+	client := &client{
+		conf:                    conf,
+		closer:                  make(chan none),
+		closed:                  make(chan none),
+		brokers:                 make(map[int32]*Broker),
+		metadata:                make(map[string]map[int32]*PartitionMetadata),
+		metadataTopics:          make(map[string]none),
+		cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
+		coordinators:            make(map[string]int32),
+	}
+
+	random := rand.New(rand.NewSource(time.Now().UnixNano()))
+	for _, index := range random.Perm(len(addrs)) {
+		client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
+	}
+
+	if conf.Metadata.Full {
+		// do an initial fetch of all cluster metadata by specifying an empty list of topics
+		err := client.RefreshMetadata()
+		switch err {
+		case nil:
+			break
+		case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
+			// indicates that maybe part of the cluster is down, but is not fatal to creating the client
+			Logger.Println(err)
+		default:
+			close(client.closed) // we haven't started the background updater yet, so we have to do this manually
+			_ = client.Close()
+			return nil, err
+		}
+	}
+	go withRecover(client.backgroundMetadataUpdater)
+
+	Logger.Println("Successfully initialized new client")
+
+	return client, nil
+}
+
+func (client *client) Config() *Config {
+	return client.conf
+}
+
+func (client *client) Brokers() []*Broker {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+	brokers := make([]*Broker, 0, len(client.brokers))
+	for _, broker := range client.brokers {
+		brokers = append(brokers, broker)
+	}
+	return brokers
+}
+
+func (client *client) InitProducerID() (*InitProducerIDResponse, error) {
+	var err error
+	for broker := client.any(); broker != nil; broker = client.any() {
+
+		req := &InitProducerIDRequest{}
+
+		response, err := broker.InitProducerID(req)
+		switch err.(type) {
+		case nil:
+			return response, nil
+		default:
+			// some error, remove that broker and try again
+			Logger.Printf("Client got error from broker %d when issuing InitProducerID : %v\n", broker.ID(), err)
+			_ = broker.Close()
+			client.deregisterBroker(broker)
+		}
+	}
+	return nil, err
+}
+
+func (client *client) Close() error {
+	if client.Closed() {
+		// Chances are this is being called from a defer() and the error will go unobserved
+		// so we go ahead and log the event in this case.
+		Logger.Printf("Close() called on already closed client")
+		return ErrClosedClient
+	}
+
+	// shutdown and wait for the background thread before we take the lock, to avoid races
+	close(client.closer)
+	<-client.closed
+
+	client.lock.Lock()
+	defer client.lock.Unlock()
+	Logger.Println("Closing Client")
+
+	for _, broker := range client.brokers {
+		safeAsyncClose(broker)
+	}
+
+	for _, broker := range client.seedBrokers {
+		safeAsyncClose(broker)
+	}
+
+	client.brokers = nil
+	client.metadata = nil
+	client.metadataTopics = nil
+
+	return nil
+}
+
+func (client *client) Closed() bool {
+	return client.brokers == nil
+}
+
+func (client *client) Topics() ([]string, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	ret := make([]string, 0, len(client.metadata))
+	for topic := range client.metadata {
+		ret = append(ret, topic)
+	}
+
+	return ret, nil
+}
+
+func (client *client) MetadataTopics() ([]string, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	ret := make([]string, 0, len(client.metadataTopics))
+	for topic := range client.metadataTopics {
+		ret = append(ret, topic)
+	}
+
+	return ret, nil
+}
+
+func (client *client) Partitions(topic string) ([]int32, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	partitions := client.cachedPartitions(topic, allPartitions)
+
+	if len(partitions) == 0 {
+		err := client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, err
+		}
+		partitions = client.cachedPartitions(topic, allPartitions)
+	}
+
+	if partitions == nil {
+		return nil, ErrUnknownTopicOrPartition
+	}
+
+	return partitions, nil
+}
+
+func (client *client) WritablePartitions(topic string) ([]int32, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	partitions := client.cachedPartitions(topic, writablePartitions)
+
+	// len==0 catches when it's nil (no such topic) and the odd case when every single
+	// partition is undergoing leader election simultaneously. Callers have to be able to handle
+	// this function returning an empty slice (which is a valid return value) but catching it
+	// here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
+	// a metadata refresh as a nicety so callers can just try again and don't have to manually
+	// trigger a refresh (otherwise they'd just keep getting a stale cached copy).
+	if len(partitions) == 0 {
+		err := client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, err
+		}
+		partitions = client.cachedPartitions(topic, writablePartitions)
+	}
+
+	if partitions == nil {
+		return nil, ErrUnknownTopicOrPartition
+	}
+
+	return partitions, nil
+}
+
+func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	metadata := client.cachedMetadata(topic, partitionID)
+
+	if metadata == nil {
+		err := client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, err
+		}
+		metadata = client.cachedMetadata(topic, partitionID)
+	}
+
+	if metadata == nil {
+		return nil, ErrUnknownTopicOrPartition
+	}
+
+	if metadata.Err == ErrReplicaNotAvailable {
+		return dupInt32Slice(metadata.Replicas), metadata.Err
+	}
+	return dupInt32Slice(metadata.Replicas), nil
+}
+
+func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	metadata := client.cachedMetadata(topic, partitionID)
+
+	if metadata == nil {
+		err := client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, err
+		}
+		metadata = client.cachedMetadata(topic, partitionID)
+	}
+
+	if metadata == nil {
+		return nil, ErrUnknownTopicOrPartition
+	}
+
+	if metadata.Err == ErrReplicaNotAvailable {
+		return dupInt32Slice(metadata.Isr), metadata.Err
+	}
+	return dupInt32Slice(metadata.Isr), nil
+}
+
+func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	leader, err := client.cachedLeader(topic, partitionID)
+
+	if leader == nil {
+		err = client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, err
+		}
+		leader, err = client.cachedLeader(topic, partitionID)
+	}
+
+	return leader, err
+}
+
+func (client *client) RefreshMetadata(topics ...string) error {
+	if client.Closed() {
+		return ErrClosedClient
+	}
+
+	// Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
+	// error. This handles the case by returning an error instead of sending it
+	// off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
+	for _, topic := range topics {
+		if len(topic) == 0 {
+			return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
+		}
+	}
+
+	return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max)
+}
+
+func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
+	if client.Closed() {
+		return -1, ErrClosedClient
+	}
+
+	offset, err := client.getOffset(topic, partitionID, time)
+
+	if err != nil {
+		if err := client.RefreshMetadata(topic); err != nil {
+			return -1, err
+		}
+		return client.getOffset(topic, partitionID, time)
+	}
+
+	return offset, err
+}
+
+func (client *client) Controller() (*Broker, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	if !client.conf.Version.IsAtLeast(V0_10_0_0) {
+		return nil, ErrUnsupportedVersion
+	}
+
+	controller := client.cachedController()
+	if controller == nil {
+		if err := client.refreshMetadata(); err != nil {
+			return nil, err
+		}
+		controller = client.cachedController()
+	}
+
+	if controller == nil {
+		return nil, ErrControllerNotAvailable
+	}
+
+	_ = controller.Open(client.conf)
+	return controller, nil
+}
+
+func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	coordinator := client.cachedCoordinator(consumerGroup)
+
+	if coordinator == nil {
+		if err := client.RefreshCoordinator(consumerGroup); err != nil {
+			return nil, err
+		}
+		coordinator = client.cachedCoordinator(consumerGroup)
+	}
+
+	if coordinator == nil {
+		return nil, ErrConsumerCoordinatorNotAvailable
+	}
+
+	_ = coordinator.Open(client.conf)
+	return coordinator, nil
+}
+
+func (client *client) RefreshCoordinator(consumerGroup string) error {
+	if client.Closed() {
+		return ErrClosedClient
+	}
+
+	response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
+	if err != nil {
+		return err
+	}
+
+	client.lock.Lock()
+	defer client.lock.Unlock()
+	client.registerBroker(response.Coordinator)
+	client.coordinators[consumerGroup] = response.Coordinator.ID()
+	return nil
+}
+
+// private broker management helpers
+
+// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
+// in the brokers map. It returns the broker that is registered, which may be the provided broker,
+// or a previously registered Broker instance. You must hold the write lock before calling this function.
+func (client *client) registerBroker(broker *Broker) {
+	if client.brokers[broker.ID()] == nil {
+		client.brokers[broker.ID()] = broker
+		Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
+	} else if broker.Addr() != client.brokers[broker.ID()].Addr() {
+		safeAsyncClose(client.brokers[broker.ID()])
+		client.brokers[broker.ID()] = broker
+		Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
+	}
+}
+
+// deregisterBroker removes a broker from the seedsBroker list, and if it's
+// not the seedbroker, removes it from brokers map completely.
+func (client *client) deregisterBroker(broker *Broker) {
+	client.lock.Lock()
+	defer client.lock.Unlock()
+
+	if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
+		client.deadSeeds = append(client.deadSeeds, broker)
+		client.seedBrokers = client.seedBrokers[1:]
+	} else {
+		// we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
+		// but we really shouldn't have to; once that loop is made better this case can be
+		// removed, and the function generally can be renamed from `deregisterBroker` to
+		// `nextSeedBroker` or something
+		Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
+		delete(client.brokers, broker.ID())
+	}
+}
+
+func (client *client) resurrectDeadBrokers() {
+	client.lock.Lock()
+	defer client.lock.Unlock()
+
+	Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
+	client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
+	client.deadSeeds = nil
+}
+
+func (client *client) any() *Broker {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	if len(client.seedBrokers) > 0 {
+		_ = client.seedBrokers[0].Open(client.conf)
+		return client.seedBrokers[0]
+	}
+
+	// not guaranteed to be random *or* deterministic
+	for _, broker := range client.brokers {
+		_ = broker.Open(client.conf)
+		return broker
+	}
+
+	return nil
+}
+
+// private caching/lazy metadata helpers
+
+type partitionType int
+
+const (
+	allPartitions partitionType = iota
+	writablePartitions
+	// If you add any more types, update the partition cache in update()
+
+	// Ensure this is the last partition type value
+	maxPartitionIndex
+)
+
+func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	partitions := client.metadata[topic]
+	if partitions != nil {
+		return partitions[partitionID]
+	}
+
+	return nil
+}
+
+func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	partitions, exists := client.cachedPartitionsResults[topic]
+
+	if !exists {
+		return nil
+	}
+	return partitions[partitionSet]
+}
+
+func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
+	partitions := client.metadata[topic]
+
+	if partitions == nil {
+		return nil
+	}
+
+	ret := make([]int32, 0, len(partitions))
+	for _, partition := range partitions {
+		if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
+			continue
+		}
+		ret = append(ret, partition.ID)
+	}
+
+	sort.Sort(int32Slice(ret))
+	return ret
+}
+
+func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	partitions := client.metadata[topic]
+	if partitions != nil {
+		metadata, ok := partitions[partitionID]
+		if ok {
+			if metadata.Err == ErrLeaderNotAvailable {
+				return nil, ErrLeaderNotAvailable
+			}
+			b := client.brokers[metadata.Leader]
+			if b == nil {
+				return nil, ErrLeaderNotAvailable
+			}
+			_ = b.Open(client.conf)
+			return b, nil
+		}
+	}
+
+	return nil, ErrUnknownTopicOrPartition
+}
+
+func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
+	broker, err := client.Leader(topic, partitionID)
+	if err != nil {
+		return -1, err
+	}
+
+	request := &OffsetRequest{}
+	if client.conf.Version.IsAtLeast(V0_10_1_0) {
+		request.Version = 1
+	}
+	request.AddBlock(topic, partitionID, time, 1)
+
+	response, err := broker.GetAvailableOffsets(request)
+	if err != nil {
+		_ = broker.Close()
+		return -1, err
+	}
+
+	block := response.GetBlock(topic, partitionID)
+	if block == nil {
+		_ = broker.Close()
+		return -1, ErrIncompleteResponse
+	}
+	if block.Err != ErrNoError {
+		return -1, block.Err
+	}
+	if len(block.Offsets) != 1 {
+		return -1, ErrOffsetOutOfRange
+	}
+
+	return block.Offsets[0], nil
+}
+
+// core metadata update logic
+
+func (client *client) backgroundMetadataUpdater() {
+	defer close(client.closed)
+
+	if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
+		return
+	}
+
+	ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
+	defer ticker.Stop()
+
+	for {
+		select {
+		case <-ticker.C:
+			if err := client.refreshMetadata(); err != nil {
+				Logger.Println("Client background metadata update:", err)
+			}
+		case <-client.closer:
+			return
+		}
+	}
+}
+
+func (client *client) refreshMetadata() error {
+	topics := []string{}
+
+	if !client.conf.Metadata.Full {
+		if specificTopics, err := client.MetadataTopics(); err != nil {
+			return err
+		} else if len(specificTopics) == 0 {
+			return ErrNoTopicsToUpdateMetadata
+		} else {
+			topics = specificTopics
+		}
+	}
+
+	if err := client.RefreshMetadata(topics...); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error {
+	retry := func(err error) error {
+		if attemptsRemaining > 0 {
+			Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
+			time.Sleep(client.conf.Metadata.Retry.Backoff)
+			return client.tryRefreshMetadata(topics, attemptsRemaining-1)
+		}
+		return err
+	}
+
+	for broker := client.any(); broker != nil; broker = client.any() {
+		if len(topics) > 0 {
+			Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
+		} else {
+			Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
+		}
+
+		req := &MetadataRequest{Topics: topics}
+		if client.conf.Version.IsAtLeast(V0_10_0_0) {
+			req.Version = 1
+		}
+		response, err := broker.GetMetadata(req)
+
+		switch err.(type) {
+		case nil:
+			allKnownMetaData := len(topics) == 0
+			// valid response, use it
+			shouldRetry, err := client.updateMetadata(response, allKnownMetaData)
+			if shouldRetry {
+				Logger.Println("client/metadata found some partitions to be leaderless")
+				return retry(err) // note: err can be nil
+			}
+			return err
+
+		case PacketEncodingError:
+			// didn't even send, return the error
+			return err
+		default:
+			// some other error, remove that broker and try again
+			Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
+			_ = broker.Close()
+			client.deregisterBroker(broker)
+		}
+	}
+
+	Logger.Println("client/metadata no available broker to send metadata request to")
+	client.resurrectDeadBrokers()
+	return retry(ErrOutOfBrokers)
+}
+
+// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
+func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) {
+	client.lock.Lock()
+	defer client.lock.Unlock()
+
+	// For all the brokers we received:
+	// - if it is a new ID, save it
+	// - if it is an existing ID, but the address we have is stale, discard the old one and save it
+	// - otherwise ignore it, replacing our existing one would just bounce the connection
+	for _, broker := range data.Brokers {
+		client.registerBroker(broker)
+	}
+
+	client.controllerID = data.ControllerID
+
+	if allKnownMetaData {
+		client.metadata = make(map[string]map[int32]*PartitionMetadata)
+		client.metadataTopics = make(map[string]none)
+		client.cachedPartitionsResults = make(map[string][maxPartitionIndex][]int32)
+	}
+	for _, topic := range data.Topics {
+		// topics must be added firstly to `metadataTopics` to guarantee that all
+		// requested topics must be recorded to keep them trackable for periodically
+		// metadata refresh.
+		if _, exists := client.metadataTopics[topic.Name]; !exists {
+			client.metadataTopics[topic.Name] = none{}
+		}
+		delete(client.metadata, topic.Name)
+		delete(client.cachedPartitionsResults, topic.Name)
+
+		switch topic.Err {
+		case ErrNoError:
+			break
+		case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
+			err = topic.Err
+			continue
+		case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
+			err = topic.Err
+			retry = true
+			continue
+		case ErrLeaderNotAvailable: // retry, but store partial partition results
+			retry = true
+			break
+		default: // don't retry, don't store partial results
+			Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
+			err = topic.Err
+			continue
+		}
+
+		client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
+		for _, partition := range topic.Partitions {
+			client.metadata[topic.Name][partition.ID] = partition
+			if partition.Err == ErrLeaderNotAvailable {
+				retry = true
+			}
+		}
+
+		var partitionCache [maxPartitionIndex][]int32
+		partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
+		partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
+		client.cachedPartitionsResults[topic.Name] = partitionCache
+	}
+
+	return
+}
+
+func (client *client) cachedCoordinator(consumerGroup string) *Broker {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+	if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
+		return client.brokers[coordinatorID]
+	}
+	return nil
+}
+
+func (client *client) cachedController() *Broker {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	return client.brokers[client.controllerID]
+}
+
+func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) {
+	retry := func(err error) (*FindCoordinatorResponse, error) {
+		if attemptsRemaining > 0 {
+			Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
+			time.Sleep(client.conf.Metadata.Retry.Backoff)
+			return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
+		}
+		return nil, err
+	}
+
+	for broker := client.any(); broker != nil; broker = client.any() {
+		Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
+
+		request := new(FindCoordinatorRequest)
+		request.CoordinatorKey = consumerGroup
+		request.CoordinatorType = CoordinatorGroup
+
+		response, err := broker.FindCoordinator(request)
+
+		if err != nil {
+			Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
+
+			switch err.(type) {
+			case PacketEncodingError:
+				return nil, err
+			default:
+				_ = broker.Close()
+				client.deregisterBroker(broker)
+				continue
+			}
+		}
+
+		switch response.Err {
+		case ErrNoError:
+			Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
+			return response, nil
+
+		case ErrConsumerCoordinatorNotAvailable:
+			Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
+
+			// This is very ugly, but this scenario will only happen once per cluster.
+			// The __consumer_offsets topic only has to be created one time.
+			// The number of partitions not configurable, but partition 0 should always exist.
+			if _, err := client.Leader("__consumer_offsets", 0); err != nil {
+				Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
+				time.Sleep(2 * time.Second)
+			}
+
+			return retry(ErrConsumerCoordinatorNotAvailable)
+		default:
+			return nil, response.Err
+		}
+	}
+
+	Logger.Println("client/coordinator no available broker to send consumer metadata request to")
+	client.resurrectDeadBrokers()
+	return retry(ErrOutOfBrokers)
+}
diff --git a/vendor/github.com/Shopify/sarama/compress.go b/vendor/github.com/Shopify/sarama/compress.go
new file mode 100644
index 0000000..94b716e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/compress.go
@@ -0,0 +1,75 @@
+package sarama
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"sync"
+
+	"github.com/eapache/go-xerial-snappy"
+	"github.com/pierrec/lz4"
+)
+
+var (
+	lz4WriterPool = sync.Pool{
+		New: func() interface{} {
+			return lz4.NewWriter(nil)
+		},
+	}
+
+	gzipWriterPool = sync.Pool{
+		New: func() interface{} {
+			return gzip.NewWriter(nil)
+		},
+	}
+)
+
+func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) {
+	switch cc {
+	case CompressionNone:
+		return data, nil
+	case CompressionGZIP:
+		var (
+			err    error
+			buf    bytes.Buffer
+			writer *gzip.Writer
+		)
+		if level != CompressionLevelDefault {
+			writer, err = gzip.NewWriterLevel(&buf, level)
+			if err != nil {
+				return nil, err
+			}
+		} else {
+			writer = gzipWriterPool.Get().(*gzip.Writer)
+			defer gzipWriterPool.Put(writer)
+			writer.Reset(&buf)
+		}
+		if _, err := writer.Write(data); err != nil {
+			return nil, err
+		}
+		if err := writer.Close(); err != nil {
+			return nil, err
+		}
+		return buf.Bytes(), nil
+	case CompressionSnappy:
+		return snappy.Encode(data), nil
+	case CompressionLZ4:
+		writer := lz4WriterPool.Get().(*lz4.Writer)
+		defer lz4WriterPool.Put(writer)
+
+		var buf bytes.Buffer
+		writer.Reset(&buf)
+
+		if _, err := writer.Write(data); err != nil {
+			return nil, err
+		}
+		if err := writer.Close(); err != nil {
+			return nil, err
+		}
+		return buf.Bytes(), nil
+	case CompressionZSTD:
+		return zstdCompressLevel(nil, data, level)
+	default:
+		return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)}
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go
new file mode 100644
index 0000000..ed20522
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/config.go
@@ -0,0 +1,581 @@
+package sarama
+
+import (
+	"compress/gzip"
+	"crypto/tls"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"regexp"
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+const defaultClientID = "sarama"
+
+var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
+
+// Config is used to pass multiple configuration options to Sarama's constructors.
+type Config struct {
+	// Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client.
+	Admin struct {
+		// The maximum duration the administrative Kafka client will wait for ClusterAdmin operations,
+		// including topics, brokers, configurations and ACLs (defaults to 3 seconds).
+		Timeout time.Duration
+	}
+
+	// Net is the namespace for network-level properties used by the Broker, and
+	// shared by the Client/Producer/Consumer.
+	Net struct {
+		// How many outstanding requests a connection is allowed to have before
+		// sending on it blocks (default 5).
+		MaxOpenRequests int
+
+		// All three of the below configurations are similar to the
+		// `socket.timeout.ms` setting in JVM kafka. All of them default
+		// to 30 seconds.
+		DialTimeout  time.Duration // How long to wait for the initial connection.
+		ReadTimeout  time.Duration // How long to wait for a response.
+		WriteTimeout time.Duration // How long to wait for a transmit.
+
+		TLS struct {
+			// Whether or not to use TLS when connecting to the broker
+			// (defaults to false).
+			Enable bool
+			// The TLS configuration to use for secure connections if
+			// enabled (defaults to nil).
+			Config *tls.Config
+		}
+
+		// SASL based authentication with broker. While there are multiple SASL authentication methods
+		// the current implementation is limited to plaintext (SASL/PLAIN) authentication
+		SASL struct {
+			// Whether or not to use SASL authentication when connecting to the broker
+			// (defaults to false).
+			Enable bool
+			// Whether or not to send the Kafka SASL handshake first if enabled
+			// (defaults to true). You should only set this to false if you're using
+			// a non-Kafka SASL proxy.
+			Handshake bool
+			//username and password for SASL/PLAIN authentication
+			User     string
+			Password string
+		}
+
+		// KeepAlive specifies the keep-alive period for an active network connection.
+		// If zero, keep-alives are disabled. (default is 0: disabled).
+		KeepAlive time.Duration
+
+		// LocalAddr is the local address to use when dialing an
+		// address. The address must be of a compatible type for the
+		// network being dialed.
+		// If nil, a local address is automatically chosen.
+		LocalAddr net.Addr
+	}
+
+	// Metadata is the namespace for metadata management properties used by the
+	// Client, and shared by the Producer/Consumer.
+	Metadata struct {
+		Retry struct {
+			// The total number of times to retry a metadata request when the
+			// cluster is in the middle of a leader election (default 3).
+			Max int
+			// How long to wait for leader election to occur before retrying
+			// (default 250ms). Similar to the JVM's `retry.backoff.ms`.
+			Backoff time.Duration
+		}
+		// How frequently to refresh the cluster metadata in the background.
+		// Defaults to 10 minutes. Set to 0 to disable. Similar to
+		// `topic.metadata.refresh.interval.ms` in the JVM version.
+		RefreshFrequency time.Duration
+
+		// Whether to maintain a full set of metadata for all topics, or just
+		// the minimal set that has been necessary so far. The full set is simpler
+		// and usually more convenient, but can take up a substantial amount of
+		// memory if you have many topics and partitions. Defaults to true.
+		Full bool
+	}
+
+	// Producer is the namespace for configuration related to producing messages,
+	// used by the Producer.
+	Producer struct {
+		// The maximum permitted size of a message (defaults to 1000000). Should be
+		// set equal to or smaller than the broker's `message.max.bytes`.
+		MaxMessageBytes int
+		// The level of acknowledgement reliability needed from the broker (defaults
+		// to WaitForLocal). Equivalent to the `request.required.acks` setting of the
+		// JVM producer.
+		RequiredAcks RequiredAcks
+		// The maximum duration the broker will wait the receipt of the number of
+		// RequiredAcks (defaults to 10 seconds). This is only relevant when
+		// RequiredAcks is set to WaitForAll or a number > 1. Only supports
+		// millisecond resolution, nanoseconds will be truncated. Equivalent to
+		// the JVM producer's `request.timeout.ms` setting.
+		Timeout time.Duration
+		// The type of compression to use on messages (defaults to no compression).
+		// Similar to `compression.codec` setting of the JVM producer.
+		Compression CompressionCodec
+		// The level of compression to use on messages. The meaning depends
+		// on the actual compression type used and defaults to default compression
+		// level for the codec.
+		CompressionLevel int
+		// Generates partitioners for choosing the partition to send messages to
+		// (defaults to hashing the message key). Similar to the `partitioner.class`
+		// setting for the JVM producer.
+		Partitioner PartitionerConstructor
+		// If enabled, the producer will ensure that exactly one copy of each message is
+		// written.
+		Idempotent bool
+
+		// Return specifies what channels will be populated. If they are set to true,
+		// you must read from the respective channels to prevent deadlock. If,
+		// however, this config is used to create a `SyncProducer`, both must be set
+		// to true and you shall not read from the channels since the producer does
+		// this internally.
+		Return struct {
+			// If enabled, successfully delivered messages will be returned on the
+			// Successes channel (default disabled).
+			Successes bool
+
+			// If enabled, messages that failed to deliver will be returned on the
+			// Errors channel, including error (default enabled).
+			Errors bool
+		}
+
+		// The following config options control how often messages are batched up and
+		// sent to the broker. By default, messages are sent as fast as possible, and
+		// all messages received while the current batch is in-flight are placed
+		// into the subsequent batch.
+		Flush struct {
+			// The best-effort number of bytes needed to trigger a flush. Use the
+			// global sarama.MaxRequestSize to set a hard upper limit.
+			Bytes int
+			// The best-effort number of messages needed to trigger a flush. Use
+			// `MaxMessages` to set a hard upper limit.
+			Messages int
+			// The best-effort frequency of flushes. Equivalent to
+			// `queue.buffering.max.ms` setting of JVM producer.
+			Frequency time.Duration
+			// The maximum number of messages the producer will send in a single
+			// broker request. Defaults to 0 for unlimited. Similar to
+			// `queue.buffering.max.messages` in the JVM producer.
+			MaxMessages int
+		}
+
+		Retry struct {
+			// The total number of times to retry sending a message (default 3).
+			// Similar to the `message.send.max.retries` setting of the JVM producer.
+			Max int
+			// How long to wait for the cluster to settle between retries
+			// (default 100ms). Similar to the `retry.backoff.ms` setting of the
+			// JVM producer.
+			Backoff time.Duration
+		}
+	}
+
+	// Consumer is the namespace for configuration related to consuming messages,
+	// used by the Consumer.
+	Consumer struct {
+
+		// Group is the namespace for configuring consumer group.
+		Group struct {
+			Session struct {
+				// The timeout used to detect consumer failures when using Kafka's group management facility.
+				// The consumer sends periodic heartbeats to indicate its liveness to the broker.
+				// If no heartbeats are received by the broker before the expiration of this session timeout,
+				// then the broker will remove this consumer from the group and initiate a rebalance.
+				// Note that the value must be in the allowable range as configured in the broker configuration
+				// by `group.min.session.timeout.ms` and `group.max.session.timeout.ms` (default 10s)
+				Timeout time.Duration
+			}
+			Heartbeat struct {
+				// The expected time between heartbeats to the consumer coordinator when using Kafka's group
+				// management facilities. Heartbeats are used to ensure that the consumer's session stays active and
+				// to facilitate rebalancing when new consumers join or leave the group.
+				// The value must be set lower than Consumer.Group.Session.Timeout, but typically should be set no
+				// higher than 1/3 of that value.
+				// It can be adjusted even lower to control the expected time for normal rebalances (default 3s)
+				Interval time.Duration
+			}
+			Rebalance struct {
+				// Strategy for allocating topic partitions to members (default BalanceStrategyRange)
+				Strategy BalanceStrategy
+				// The maximum allowed time for each worker to join the group once a rebalance has begun.
+				// This is basically a limit on the amount of time needed for all tasks to flush any pending
+				// data and commit offsets. If the timeout is exceeded, then the worker will be removed from
+				// the group, which will cause offset commit failures (default 60s).
+				Timeout time.Duration
+
+				Retry struct {
+					// When a new consumer joins a consumer group the set of consumers attempt to "rebalance"
+					// the load to assign partitions to each consumer. If the set of consumers changes while
+					// this assignment is taking place the rebalance will fail and retry. This setting controls
+					// the maximum number of attempts before giving up (default 4).
+					Max int
+					// Backoff time between retries during rebalance (default 2s)
+					Backoff time.Duration
+				}
+			}
+			Member struct {
+				// Custom metadata to include when joining the group. The user data for all joined members
+				// can be retrieved by sending a DescribeGroupRequest to the broker that is the
+				// coordinator for the group.
+				UserData []byte
+			}
+		}
+
+		Retry struct {
+			// How long to wait after a failing to read from a partition before
+			// trying again (default 2s).
+			Backoff time.Duration
+		}
+
+		// Fetch is the namespace for controlling how many bytes are retrieved by any
+		// given request.
+		Fetch struct {
+			// The minimum number of message bytes to fetch in a request - the broker
+			// will wait until at least this many are available. The default is 1,
+			// as 0 causes the consumer to spin when no messages are available.
+			// Equivalent to the JVM's `fetch.min.bytes`.
+			Min int32
+			// The default number of message bytes to fetch from the broker in each
+			// request (default 1MB). This should be larger than the majority of
+			// your messages, or else the consumer will spend a lot of time
+			// negotiating sizes and not actually consuming. Similar to the JVM's
+			// `fetch.message.max.bytes`.
+			Default int32
+			// The maximum number of message bytes to fetch from the broker in a
+			// single request. Messages larger than this will return
+			// ErrMessageTooLarge and will not be consumable, so you must be sure
+			// this is at least as large as your largest message. Defaults to 0
+			// (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
+			// global `sarama.MaxResponseSize` still applies.
+			Max int32
+		}
+		// The maximum amount of time the broker will wait for Consumer.Fetch.Min
+		// bytes to become available before it returns fewer than that anyways. The
+		// default is 250ms, since 0 causes the consumer to spin when no events are
+		// available. 100-500ms is a reasonable range for most cases. Kafka only
+		// supports precision up to milliseconds; nanoseconds will be truncated.
+		// Equivalent to the JVM's `fetch.wait.max.ms`.
+		MaxWaitTime time.Duration
+
+		// The maximum amount of time the consumer expects a message takes to
+		// process for the user. If writing to the Messages channel takes longer
+		// than this, that partition will stop fetching more messages until it
+		// can proceed again.
+		// Note that, since the Messages channel is buffered, the actual grace time is
+		// (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
+		// If a message is not written to the Messages channel between two ticks
+		// of the expiryTicker then a timeout is detected.
+		// Using a ticker instead of a timer to detect timeouts should typically
+		// result in many fewer calls to Timer functions which may result in a
+		// significant performance improvement if many messages are being sent
+		// and timeouts are infrequent.
+		// The disadvantage of using a ticker instead of a timer is that
+		// timeouts will be less accurate. That is, the effective timeout could
+		// be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
+		// example, if `MaxProcessingTime` is 100ms then a delay of 180ms
+		// between two messages being sent may not be recognized as a timeout.
+		MaxProcessingTime time.Duration
+
+		// Return specifies what channels will be populated. If they are set to true,
+		// you must read from them to prevent deadlock.
+		Return struct {
+			// If enabled, any errors that occurred while consuming are returned on
+			// the Errors channel (default disabled).
+			Errors bool
+		}
+
+		// Offsets specifies configuration for how and when to commit consumed
+		// offsets. This currently requires the manual use of an OffsetManager
+		// but will eventually be automated.
+		Offsets struct {
+			// How frequently to commit updated offsets. Defaults to 1s.
+			CommitInterval time.Duration
+
+			// The initial offset to use if no offset was previously committed.
+			// Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
+			Initial int64
+
+			// The retention duration for committed offsets. If zero, disabled
+			// (in which case the `offsets.retention.minutes` option on the
+			// broker will be used).  Kafka only supports precision up to
+			// milliseconds; nanoseconds will be truncated. Requires Kafka
+			// broker version 0.9.0 or later.
+			// (default is 0: disabled).
+			Retention time.Duration
+
+			Retry struct {
+				// The total number of times to retry failing commit
+				// requests during OffsetManager shutdown (default 3).
+				Max int
+			}
+		}
+	}
+
+	// A user-provided string sent with every request to the brokers for logging,
+	// debugging, and auditing purposes. Defaults to "sarama", but you should
+	// probably set it to something specific to your application.
+	ClientID string
+	// The number of events to buffer in internal and external channels. This
+	// permits the producer and consumer to continue processing some messages
+	// in the background while user code is working, greatly improving throughput.
+	// Defaults to 256.
+	ChannelBufferSize int
+	// The version of Kafka that Sarama will assume it is running against.
+	// Defaults to the oldest supported stable version. Since Kafka provides
+	// backwards-compatibility, setting it to a version older than you have
+	// will not break anything, although it may prevent you from using the
+	// latest features. Setting it to a version greater than you are actually
+	// running may lead to random breakage.
+	Version KafkaVersion
+	// The registry to define metrics into.
+	// Defaults to a local registry.
+	// If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
+	// prior to starting Sarama.
+	// See Examples on how to use the metrics registry
+	MetricRegistry metrics.Registry
+}
+
+// NewConfig returns a new configuration instance with sane defaults.
+func NewConfig() *Config {
+	c := &Config{}
+
+	c.Admin.Timeout = 3 * time.Second
+
+	c.Net.MaxOpenRequests = 5
+	c.Net.DialTimeout = 30 * time.Second
+	c.Net.ReadTimeout = 30 * time.Second
+	c.Net.WriteTimeout = 30 * time.Second
+	c.Net.SASL.Handshake = true
+
+	c.Metadata.Retry.Max = 3
+	c.Metadata.Retry.Backoff = 250 * time.Millisecond
+	c.Metadata.RefreshFrequency = 10 * time.Minute
+	c.Metadata.Full = true
+
+	c.Producer.MaxMessageBytes = 1000000
+	c.Producer.RequiredAcks = WaitForLocal
+	c.Producer.Timeout = 10 * time.Second
+	c.Producer.Partitioner = NewHashPartitioner
+	c.Producer.Retry.Max = 3
+	c.Producer.Retry.Backoff = 100 * time.Millisecond
+	c.Producer.Return.Errors = true
+	c.Producer.CompressionLevel = CompressionLevelDefault
+
+	c.Consumer.Fetch.Min = 1
+	c.Consumer.Fetch.Default = 1024 * 1024
+	c.Consumer.Retry.Backoff = 2 * time.Second
+	c.Consumer.MaxWaitTime = 250 * time.Millisecond
+	c.Consumer.MaxProcessingTime = 100 * time.Millisecond
+	c.Consumer.Return.Errors = false
+	c.Consumer.Offsets.CommitInterval = 1 * time.Second
+	c.Consumer.Offsets.Initial = OffsetNewest
+	c.Consumer.Offsets.Retry.Max = 3
+
+	c.Consumer.Group.Session.Timeout = 10 * time.Second
+	c.Consumer.Group.Heartbeat.Interval = 3 * time.Second
+	c.Consumer.Group.Rebalance.Strategy = BalanceStrategyRange
+	c.Consumer.Group.Rebalance.Timeout = 60 * time.Second
+	c.Consumer.Group.Rebalance.Retry.Max = 4
+	c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second
+
+	c.ClientID = defaultClientID
+	c.ChannelBufferSize = 256
+	c.Version = MinVersion
+	c.MetricRegistry = metrics.NewRegistry()
+
+	return c
+}
+
+// Validate checks a Config instance. It will return a
+// ConfigurationError if the specified values don't make sense.
+func (c *Config) Validate() error {
+	// some configuration values should be warned on but not fail completely, do those first
+	if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil {
+		Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
+	}
+	if c.Net.SASL.Enable == false {
+		if c.Net.SASL.User != "" {
+			Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
+		}
+		if c.Net.SASL.Password != "" {
+			Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
+		}
+	}
+	if c.Producer.RequiredAcks > 1 {
+		Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
+	}
+	if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
+		Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
+	}
+	if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
+		Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
+	}
+	if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
+		Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
+	}
+	if c.Producer.Timeout%time.Millisecond != 0 {
+		Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
+	}
+	if c.Consumer.MaxWaitTime < 100*time.Millisecond {
+		Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
+	}
+	if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
+		Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
+		Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.Consumer.Group.Session.Timeout%time.Millisecond != 0 {
+		Logger.Println("Consumer.Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.Consumer.Group.Heartbeat.Interval%time.Millisecond != 0 {
+		Logger.Println("Consumer.Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.Consumer.Group.Rebalance.Timeout%time.Millisecond != 0 {
+		Logger.Println("Consumer.Group.Rebalance.Timeout only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.ClientID == defaultClientID {
+		Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
+	}
+
+	// validate Net values
+	switch {
+	case c.Net.MaxOpenRequests <= 0:
+		return ConfigurationError("Net.MaxOpenRequests must be > 0")
+	case c.Net.DialTimeout <= 0:
+		return ConfigurationError("Net.DialTimeout must be > 0")
+	case c.Net.ReadTimeout <= 0:
+		return ConfigurationError("Net.ReadTimeout must be > 0")
+	case c.Net.WriteTimeout <= 0:
+		return ConfigurationError("Net.WriteTimeout must be > 0")
+	case c.Net.KeepAlive < 0:
+		return ConfigurationError("Net.KeepAlive must be >= 0")
+	case c.Net.SASL.Enable == true && c.Net.SASL.User == "":
+		return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
+	case c.Net.SASL.Enable == true && c.Net.SASL.Password == "":
+		return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
+	}
+
+	// validate the Admin values
+	switch {
+	case c.Admin.Timeout <= 0:
+		return ConfigurationError("Admin.Timeout must be > 0")
+	}
+
+	// validate the Metadata values
+	switch {
+	case c.Metadata.Retry.Max < 0:
+		return ConfigurationError("Metadata.Retry.Max must be >= 0")
+	case c.Metadata.Retry.Backoff < 0:
+		return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
+	case c.Metadata.RefreshFrequency < 0:
+		return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
+	}
+
+	// validate the Producer values
+	switch {
+	case c.Producer.MaxMessageBytes <= 0:
+		return ConfigurationError("Producer.MaxMessageBytes must be > 0")
+	case c.Producer.RequiredAcks < -1:
+		return ConfigurationError("Producer.RequiredAcks must be >= -1")
+	case c.Producer.Timeout <= 0:
+		return ConfigurationError("Producer.Timeout must be > 0")
+	case c.Producer.Partitioner == nil:
+		return ConfigurationError("Producer.Partitioner must not be nil")
+	case c.Producer.Flush.Bytes < 0:
+		return ConfigurationError("Producer.Flush.Bytes must be >= 0")
+	case c.Producer.Flush.Messages < 0:
+		return ConfigurationError("Producer.Flush.Messages must be >= 0")
+	case c.Producer.Flush.Frequency < 0:
+		return ConfigurationError("Producer.Flush.Frequency must be >= 0")
+	case c.Producer.Flush.MaxMessages < 0:
+		return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
+	case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
+		return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
+	case c.Producer.Retry.Max < 0:
+		return ConfigurationError("Producer.Retry.Max must be >= 0")
+	case c.Producer.Retry.Backoff < 0:
+		return ConfigurationError("Producer.Retry.Backoff must be >= 0")
+	}
+
+	if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
+		return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
+	}
+
+	if c.Producer.Compression == CompressionGZIP {
+		if c.Producer.CompressionLevel != CompressionLevelDefault {
+			if _, err := gzip.NewWriterLevel(ioutil.Discard, c.Producer.CompressionLevel); err != nil {
+				return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err))
+			}
+		}
+	}
+
+	if c.Producer.Idempotent {
+		if !c.Version.IsAtLeast(V0_11_0_0) {
+			return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0")
+		}
+		if c.Producer.Retry.Max == 0 {
+			return ConfigurationError("Idempotent producer requires Producer.Retry.Max >= 1")
+		}
+		if c.Producer.RequiredAcks != WaitForAll {
+			return ConfigurationError("Idempotent producer requires Producer.RequiredAcks to be WaitForAll")
+		}
+		if c.Net.MaxOpenRequests > 1 {
+			return ConfigurationError("Idempotent producer requires Net.MaxOpenRequests to be 1")
+		}
+	}
+
+	// validate the Consumer values
+	switch {
+	case c.Consumer.Fetch.Min <= 0:
+		return ConfigurationError("Consumer.Fetch.Min must be > 0")
+	case c.Consumer.Fetch.Default <= 0:
+		return ConfigurationError("Consumer.Fetch.Default must be > 0")
+	case c.Consumer.Fetch.Max < 0:
+		return ConfigurationError("Consumer.Fetch.Max must be >= 0")
+	case c.Consumer.MaxWaitTime < 1*time.Millisecond:
+		return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
+	case c.Consumer.MaxProcessingTime <= 0:
+		return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
+	case c.Consumer.Retry.Backoff < 0:
+		return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
+	case c.Consumer.Offsets.CommitInterval <= 0:
+		return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
+	case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
+		return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
+	case c.Consumer.Offsets.Retry.Max < 0:
+		return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0")
+	}
+
+	// validate the Consumer Group values
+	switch {
+	case c.Consumer.Group.Session.Timeout <= 2*time.Millisecond:
+		return ConfigurationError("Consumer.Group.Session.Timeout must be >= 2ms")
+	case c.Consumer.Group.Heartbeat.Interval < 1*time.Millisecond:
+		return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms")
+	case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout:
+		return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout")
+	case c.Consumer.Group.Rebalance.Strategy == nil:
+		return ConfigurationError("Consumer.Group.Rebalance.Strategy must not be empty")
+	case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond:
+		return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms")
+	case c.Consumer.Group.Rebalance.Retry.Max < 0:
+		return ConfigurationError("Consumer.Group.Rebalance.Retry.Max must be >= 0")
+	case c.Consumer.Group.Rebalance.Retry.Backoff < 0:
+		return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0")
+	}
+
+	// validate misc shared values
+	switch {
+	case c.ChannelBufferSize < 0:
+		return ConfigurationError("ChannelBufferSize must be >= 0")
+	case !validID.MatchString(c.ClientID):
+		return ConfigurationError("ClientID is invalid")
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/Shopify/sarama/config_resource_type.go
new file mode 100644
index 0000000..848cc9c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/config_resource_type.go
@@ -0,0 +1,15 @@
+package sarama
+
+type ConfigResourceType int8
+
+// Taken from :
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-133%3A+Describe+and+Alter+Configs+Admin+APIs#KIP-133:DescribeandAlterConfigsAdminAPIs-WireFormattypes
+
+const (
+	UnknownResource ConfigResourceType = 0
+	AnyResource     ConfigResourceType = 1
+	TopicResource   ConfigResourceType = 2
+	GroupResource   ConfigResourceType = 3
+	ClusterResource ConfigResourceType = 4
+	BrokerResource  ConfigResourceType = 5
+)
diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go
new file mode 100644
index 0000000..33d9d14
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer.go
@@ -0,0 +1,807 @@
+package sarama
+
+import (
+	"errors"
+	"fmt"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// ConsumerMessage encapsulates a Kafka message returned by the consumer.
+type ConsumerMessage struct {
+	Key, Value     []byte
+	Topic          string
+	Partition      int32
+	Offset         int64
+	Timestamp      time.Time       // only set if kafka is version 0.10+, inner message timestamp
+	BlockTimestamp time.Time       // only set if kafka is version 0.10+, outer (compressed) block timestamp
+	Headers        []*RecordHeader // only set if kafka is version 0.11+
+}
+
+// ConsumerError is what is provided to the user when an error occurs.
+// It wraps an error and includes the topic and partition.
+type ConsumerError struct {
+	Topic     string
+	Partition int32
+	Err       error
+}
+
+func (ce ConsumerError) Error() string {
+	return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
+}
+
+// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
+// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
+// when stopping.
+type ConsumerErrors []*ConsumerError
+
+func (ce ConsumerErrors) Error() string {
+	return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
+}
+
+// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
+// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
+// scope.
+//
+// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking.
+// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library
+// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the
+// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
+type Consumer interface {
+
+	// Topics returns the set of available topics as retrieved from the cluster
+	// metadata. This method is the same as Client.Topics(), and is provided for
+	// convenience.
+	Topics() ([]string, error)
+
+	// Partitions returns the sorted list of all partition IDs for the given topic.
+	// This method is the same as Client.Partitions(), and is provided for convenience.
+	Partitions(topic string) ([]int32, error)
+
+	// ConsumePartition creates a PartitionConsumer on the given topic/partition with
+	// the given offset. It will return an error if this Consumer is already consuming
+	// on the given topic/partition. Offset can be a literal offset, or OffsetNewest
+	// or OffsetOldest
+	ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
+
+	// HighWaterMarks returns the current high water marks for each topic and partition.
+	// Consistency between partitions is not guaranteed since high water marks are updated separately.
+	HighWaterMarks() map[string]map[int32]int64
+
+	// Close shuts down the consumer. It must be called after all child
+	// PartitionConsumers have already been closed.
+	Close() error
+}
+
+type consumer struct {
+	client    Client
+	conf      *Config
+	ownClient bool
+
+	lock            sync.Mutex
+	children        map[string]map[int32]*partitionConsumer
+	brokerConsumers map[*Broker]*brokerConsumer
+}
+
+// NewConsumer creates a new consumer using the given broker addresses and configuration.
+func NewConsumer(addrs []string, config *Config) (Consumer, error) {
+	client, err := NewClient(addrs, config)
+	if err != nil {
+		return nil, err
+	}
+
+	c, err := NewConsumerFromClient(client)
+	if err != nil {
+		return nil, err
+	}
+	c.(*consumer).ownClient = true
+	return c, nil
+}
+
+// NewConsumerFromClient creates a new consumer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this consumer.
+func NewConsumerFromClient(client Client) (Consumer, error) {
+	// Check that we are not dealing with a closed Client before processing any other arguments
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	c := &consumer{
+		client:          client,
+		conf:            client.Config(),
+		children:        make(map[string]map[int32]*partitionConsumer),
+		brokerConsumers: make(map[*Broker]*brokerConsumer),
+	}
+
+	return c, nil
+}
+
+func (c *consumer) Close() error {
+	if c.ownClient {
+		return c.client.Close()
+	}
+	return nil
+}
+
+func (c *consumer) Topics() ([]string, error) {
+	return c.client.Topics()
+}
+
+func (c *consumer) Partitions(topic string) ([]int32, error) {
+	return c.client.Partitions(topic)
+}
+
+func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
+	child := &partitionConsumer{
+		consumer:  c,
+		conf:      c.conf,
+		topic:     topic,
+		partition: partition,
+		messages:  make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
+		errors:    make(chan *ConsumerError, c.conf.ChannelBufferSize),
+		feeder:    make(chan *FetchResponse, 1),
+		trigger:   make(chan none, 1),
+		dying:     make(chan none),
+		fetchSize: c.conf.Consumer.Fetch.Default,
+	}
+
+	if err := child.chooseStartingOffset(offset); err != nil {
+		return nil, err
+	}
+
+	var leader *Broker
+	var err error
+	if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
+		return nil, err
+	}
+
+	if err := c.addChild(child); err != nil {
+		return nil, err
+	}
+
+	go withRecover(child.dispatcher)
+	go withRecover(child.responseFeeder)
+
+	child.broker = c.refBrokerConsumer(leader)
+	child.broker.input <- child
+
+	return child, nil
+}
+
+func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	hwms := make(map[string]map[int32]int64)
+	for topic, p := range c.children {
+		hwm := make(map[int32]int64, len(p))
+		for partition, pc := range p {
+			hwm[partition] = pc.HighWaterMarkOffset()
+		}
+		hwms[topic] = hwm
+	}
+
+	return hwms
+}
+
+func (c *consumer) addChild(child *partitionConsumer) error {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	topicChildren := c.children[child.topic]
+	if topicChildren == nil {
+		topicChildren = make(map[int32]*partitionConsumer)
+		c.children[child.topic] = topicChildren
+	}
+
+	if topicChildren[child.partition] != nil {
+		return ConfigurationError("That topic/partition is already being consumed")
+	}
+
+	topicChildren[child.partition] = child
+	return nil
+}
+
+func (c *consumer) removeChild(child *partitionConsumer) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	delete(c.children[child.topic], child.partition)
+}
+
+func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	bc := c.brokerConsumers[broker]
+	if bc == nil {
+		bc = c.newBrokerConsumer(broker)
+		c.brokerConsumers[broker] = bc
+	}
+
+	bc.refs++
+
+	return bc
+}
+
+func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	brokerWorker.refs--
+
+	if brokerWorker.refs == 0 {
+		close(brokerWorker.input)
+		if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
+			delete(c.brokerConsumers, brokerWorker.broker)
+		}
+	}
+}
+
+func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	delete(c.brokerConsumers, brokerWorker.broker)
+}
+
+// PartitionConsumer
+
+// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
+// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
+// of scope.
+//
+// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
+// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
+// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
+// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
+// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
+// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
+// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
+//
+// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
+// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process
+// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
+// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
+// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
+type PartitionConsumer interface {
+
+	// AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
+	// should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
+	// function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
+	// this before calling Close on the underlying client.
+	AsyncClose()
+
+	// Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
+	// the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
+	// the Messages channel when this function is called, you will be competing with Close for messages; consider
+	// calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
+	// out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
+	Close() error
+
+	// Messages returns the read channel for the messages that are returned by
+	// the broker.
+	Messages() <-chan *ConsumerMessage
+
+	// Errors returns a read channel of errors that occurred during consuming, if
+	// enabled. By default, errors are logged and not returned over this channel.
+	// If you want to implement any custom error handling, set your config's
+	// Consumer.Return.Errors setting to true, and read from this channel.
+	Errors() <-chan *ConsumerError
+
+	// HighWaterMarkOffset returns the high water mark offset of the partition,
+	// i.e. the offset that will be used for the next message that will be produced.
+	// You can use this to determine how far behind the processing is.
+	HighWaterMarkOffset() int64
+}
+
+type partitionConsumer struct {
+	highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	consumer            *consumer
+	conf                *Config
+	topic               string
+	partition           int32
+
+	broker   *brokerConsumer
+	messages chan *ConsumerMessage
+	errors   chan *ConsumerError
+	feeder   chan *FetchResponse
+
+	trigger, dying chan none
+	responseResult error
+	closeOnce      sync.Once
+
+	fetchSize int32
+	offset    int64
+}
+
+var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
+
+func (child *partitionConsumer) sendError(err error) {
+	cErr := &ConsumerError{
+		Topic:     child.topic,
+		Partition: child.partition,
+		Err:       err,
+	}
+
+	if child.conf.Consumer.Return.Errors {
+		child.errors <- cErr
+	} else {
+		Logger.Println(cErr)
+	}
+}
+
+func (child *partitionConsumer) dispatcher() {
+	for range child.trigger {
+		select {
+		case <-child.dying:
+			close(child.trigger)
+		case <-time.After(child.conf.Consumer.Retry.Backoff):
+			if child.broker != nil {
+				child.consumer.unrefBrokerConsumer(child.broker)
+				child.broker = nil
+			}
+
+			Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
+			if err := child.dispatch(); err != nil {
+				child.sendError(err)
+				child.trigger <- none{}
+			}
+		}
+	}
+
+	if child.broker != nil {
+		child.consumer.unrefBrokerConsumer(child.broker)
+	}
+	child.consumer.removeChild(child)
+	close(child.feeder)
+}
+
+func (child *partitionConsumer) dispatch() error {
+	if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
+		return err
+	}
+
+	var leader *Broker
+	var err error
+	if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
+		return err
+	}
+
+	child.broker = child.consumer.refBrokerConsumer(leader)
+
+	child.broker.input <- child
+
+	return nil
+}
+
+func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
+	newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
+	if err != nil {
+		return err
+	}
+	oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
+	if err != nil {
+		return err
+	}
+
+	switch {
+	case offset == OffsetNewest:
+		child.offset = newestOffset
+	case offset == OffsetOldest:
+		child.offset = oldestOffset
+	case offset >= oldestOffset && offset <= newestOffset:
+		child.offset = offset
+	default:
+		return ErrOffsetOutOfRange
+	}
+
+	return nil
+}
+
+func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
+	return child.messages
+}
+
+func (child *partitionConsumer) Errors() <-chan *ConsumerError {
+	return child.errors
+}
+
+func (child *partitionConsumer) AsyncClose() {
+	// this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
+	// the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
+	// 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
+	// also just close itself)
+	child.closeOnce.Do(func() {
+		close(child.dying)
+	})
+}
+
+func (child *partitionConsumer) Close() error {
+	child.AsyncClose()
+
+	go withRecover(func() {
+		for range child.messages {
+			// drain
+		}
+	})
+
+	var errors ConsumerErrors
+	for err := range child.errors {
+		errors = append(errors, err)
+	}
+
+	if len(errors) > 0 {
+		return errors
+	}
+	return nil
+}
+
+func (child *partitionConsumer) HighWaterMarkOffset() int64 {
+	return atomic.LoadInt64(&child.highWaterMarkOffset)
+}
+
+func (child *partitionConsumer) responseFeeder() {
+	var msgs []*ConsumerMessage
+	expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
+	firstAttempt := true
+
+feederLoop:
+	for response := range child.feeder {
+		msgs, child.responseResult = child.parseResponse(response)
+
+		for i, msg := range msgs {
+		messageSelect:
+			select {
+			case child.messages <- msg:
+				firstAttempt = true
+			case <-expiryTicker.C:
+				if !firstAttempt {
+					child.responseResult = errTimedOut
+					child.broker.acks.Done()
+					for _, msg = range msgs[i:] {
+						child.messages <- msg
+					}
+					child.broker.input <- child
+					continue feederLoop
+				} else {
+					// current message has not been sent, return to select
+					// statement
+					firstAttempt = false
+					goto messageSelect
+				}
+			}
+		}
+
+		child.broker.acks.Done()
+	}
+
+	expiryTicker.Stop()
+	close(child.messages)
+	close(child.errors)
+}
+
+func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) {
+	var messages []*ConsumerMessage
+	for _, msgBlock := range msgSet.Messages {
+		for _, msg := range msgBlock.Messages() {
+			offset := msg.Offset
+			if msg.Msg.Version >= 1 {
+				baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
+				offset += baseOffset
+			}
+			if offset < child.offset {
+				continue
+			}
+			messages = append(messages, &ConsumerMessage{
+				Topic:          child.topic,
+				Partition:      child.partition,
+				Key:            msg.Msg.Key,
+				Value:          msg.Msg.Value,
+				Offset:         offset,
+				Timestamp:      msg.Msg.Timestamp,
+				BlockTimestamp: msgBlock.Msg.Timestamp,
+			})
+			child.offset = offset + 1
+		}
+	}
+	if len(messages) == 0 {
+		return nil, ErrIncompleteResponse
+	}
+	return messages, nil
+}
+
+func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
+	var messages []*ConsumerMessage
+	for _, rec := range batch.Records {
+		offset := batch.FirstOffset + rec.OffsetDelta
+		if offset < child.offset {
+			continue
+		}
+		messages = append(messages, &ConsumerMessage{
+			Topic:     child.topic,
+			Partition: child.partition,
+			Key:       rec.Key,
+			Value:     rec.Value,
+			Offset:    offset,
+			Timestamp: batch.FirstTimestamp.Add(rec.TimestampDelta),
+			Headers:   rec.Headers,
+		})
+		child.offset = offset + 1
+	}
+	if len(messages) == 0 {
+		child.offset += 1
+	}
+	return messages, nil
+}
+
+func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
+	block := response.GetBlock(child.topic, child.partition)
+	if block == nil {
+		return nil, ErrIncompleteResponse
+	}
+
+	if block.Err != ErrNoError {
+		return nil, block.Err
+	}
+
+	nRecs, err := block.numRecords()
+	if err != nil {
+		return nil, err
+	}
+	if nRecs == 0 {
+		partialTrailingMessage, err := block.isPartial()
+		if err != nil {
+			return nil, err
+		}
+		// We got no messages. If we got a trailing one then we need to ask for more data.
+		// Otherwise we just poll again and wait for one to be produced...
+		if partialTrailingMessage {
+			if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
+				// we can't ask for more data, we've hit the configured limit
+				child.sendError(ErrMessageTooLarge)
+				child.offset++ // skip this one so we can keep processing future messages
+			} else {
+				child.fetchSize *= 2
+				if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
+					child.fetchSize = child.conf.Consumer.Fetch.Max
+				}
+			}
+		}
+
+		return nil, nil
+	}
+
+	// we got messages, reset our fetch size in case it was increased for a previous request
+	child.fetchSize = child.conf.Consumer.Fetch.Default
+	atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
+
+	messages := []*ConsumerMessage{}
+	for _, records := range block.RecordsSet {
+		switch records.recordsType {
+		case legacyRecords:
+			messageSetMessages, err := child.parseMessages(records.MsgSet)
+			if err != nil {
+				return nil, err
+			}
+
+			messages = append(messages, messageSetMessages...)
+		case defaultRecords:
+			recordBatchMessages, err := child.parseRecords(records.RecordBatch)
+			if err != nil {
+				return nil, err
+			}
+			if control, err := records.isControl(); err != nil || control {
+				continue
+			}
+
+			messages = append(messages, recordBatchMessages...)
+		default:
+			return nil, fmt.Errorf("unknown records type: %v", records.recordsType)
+		}
+	}
+
+	return messages, nil
+}
+
+// brokerConsumer
+
+type brokerConsumer struct {
+	consumer         *consumer
+	broker           *Broker
+	input            chan *partitionConsumer
+	newSubscriptions chan []*partitionConsumer
+	wait             chan none
+	subscriptions    map[*partitionConsumer]none
+	acks             sync.WaitGroup
+	refs             int
+}
+
+func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
+	bc := &brokerConsumer{
+		consumer:         c,
+		broker:           broker,
+		input:            make(chan *partitionConsumer),
+		newSubscriptions: make(chan []*partitionConsumer),
+		wait:             make(chan none),
+		subscriptions:    make(map[*partitionConsumer]none),
+		refs:             0,
+	}
+
+	go withRecover(bc.subscriptionManager)
+	go withRecover(bc.subscriptionConsumer)
+
+	return bc
+}
+
+func (bc *brokerConsumer) subscriptionManager() {
+	var buffer []*partitionConsumer
+
+	// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
+	// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
+	// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
+	// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
+	// so the main goroutine can block waiting for work if it has none.
+	for {
+		if len(buffer) > 0 {
+			select {
+			case event, ok := <-bc.input:
+				if !ok {
+					goto done
+				}
+				buffer = append(buffer, event)
+			case bc.newSubscriptions <- buffer:
+				buffer = nil
+			case bc.wait <- none{}:
+			}
+		} else {
+			select {
+			case event, ok := <-bc.input:
+				if !ok {
+					goto done
+				}
+				buffer = append(buffer, event)
+			case bc.newSubscriptions <- nil:
+			}
+		}
+	}
+
+done:
+	close(bc.wait)
+	if len(buffer) > 0 {
+		bc.newSubscriptions <- buffer
+	}
+	close(bc.newSubscriptions)
+}
+
+func (bc *brokerConsumer) subscriptionConsumer() {
+	<-bc.wait // wait for our first piece of work
+
+	// the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
+	for newSubscriptions := range bc.newSubscriptions {
+		bc.updateSubscriptions(newSubscriptions)
+
+		if len(bc.subscriptions) == 0 {
+			// We're about to be shut down or we're about to receive more subscriptions.
+			// Either way, the signal just hasn't propagated to our goroutine yet.
+			<-bc.wait
+			continue
+		}
+
+		response, err := bc.fetchNewMessages()
+
+		if err != nil {
+			Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
+			bc.abort(err)
+			return
+		}
+
+		bc.acks.Add(len(bc.subscriptions))
+		for child := range bc.subscriptions {
+			child.feeder <- response
+		}
+		bc.acks.Wait()
+		bc.handleResponses()
+	}
+}
+
+func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
+	for _, child := range newSubscriptions {
+		bc.subscriptions[child] = none{}
+		Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+	}
+
+	for child := range bc.subscriptions {
+		select {
+		case <-child.dying:
+			Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+			close(child.trigger)
+			delete(bc.subscriptions, child)
+		default:
+			break
+		}
+	}
+}
+
+func (bc *brokerConsumer) handleResponses() {
+	// handles the response codes left for us by our subscriptions, and abandons ones that have been closed
+	for child := range bc.subscriptions {
+		result := child.responseResult
+		child.responseResult = nil
+
+		switch result {
+		case nil:
+			break
+		case errTimedOut:
+			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
+				bc.broker.ID(), child.topic, child.partition)
+			delete(bc.subscriptions, child)
+		case ErrOffsetOutOfRange:
+			// there's no point in retrying this it will just fail the same way again
+			// shut it down and force the user to choose what to do
+			child.sendError(result)
+			Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
+			close(child.trigger)
+			delete(bc.subscriptions, child)
+		case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable:
+			// not an error, but does need redispatching
+			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+				bc.broker.ID(), child.topic, child.partition, result)
+			child.trigger <- none{}
+			delete(bc.subscriptions, child)
+		default:
+			// dunno, tell the user and try redispatching
+			child.sendError(result)
+			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+				bc.broker.ID(), child.topic, child.partition, result)
+			child.trigger <- none{}
+			delete(bc.subscriptions, child)
+		}
+	}
+}
+
+func (bc *brokerConsumer) abort(err error) {
+	bc.consumer.abandonBrokerConsumer(bc)
+	_ = bc.broker.Close() // we don't care about the error this might return, we already have one
+
+	for child := range bc.subscriptions {
+		child.sendError(err)
+		child.trigger <- none{}
+	}
+
+	for newSubscriptions := range bc.newSubscriptions {
+		if len(newSubscriptions) == 0 {
+			<-bc.wait
+			continue
+		}
+		for _, child := range newSubscriptions {
+			child.sendError(err)
+			child.trigger <- none{}
+		}
+	}
+}
+
+func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
+	request := &FetchRequest{
+		MinBytes:    bc.consumer.conf.Consumer.Fetch.Min,
+		MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
+	}
+	if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
+		request.Version = 2
+	}
+	if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
+		request.Version = 3
+		request.MaxBytes = MaxResponseSize
+	}
+	if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
+		request.Version = 4
+		request.Isolation = ReadUncommitted // We don't support yet transactions.
+	}
+
+	for child := range bc.subscriptions {
+		request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
+	}
+
+	return bc.broker.Fetch(request)
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/Shopify/sarama/consumer_group.go
new file mode 100644
index 0000000..bb6a2c2
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_group.go
@@ -0,0 +1,774 @@
+package sarama
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sort"
+	"sync"
+	"time"
+)
+
+// ErrClosedConsumerGroup is the error returned when a method is called on a consumer group that has been closed.
+var ErrClosedConsumerGroup = errors.New("kafka: tried to use a consumer group that was closed")
+
+// ConsumerGroup is responsible for dividing up processing of topics and partitions
+// over a collection of processes (the members of the consumer group).
+type ConsumerGroup interface {
+	// Consume joins a cluster of consumers for a given list of topics and
+	// starts a blocking ConsumerGroupSession through the ConsumerGroupHandler.
+	//
+	// The life-cycle of a session is represented by the following steps:
+	//
+	// 1. The consumers join the group (as explained in https://kafka.apache.org/documentation/#intro_consumers)
+	//    and is assigned their "fair share" of partitions, aka 'claims'.
+	// 2. Before processing starts, the handler's Setup() hook is called to notify the user
+	//    of the claims and allow any necessary preparation or alteration of state.
+	// 3. For each of the assigned claims the handler's ConsumeClaim() function is then called
+	//    in a separate goroutine which requires it to be thread-safe. Any state must be carefully protected
+	//    from concurrent reads/writes.
+	// 4. The session will persist until one of the ConsumeClaim() functions exits. This can be either when the
+	//    parent context is cancelled or when a server-side rebalance cycle is initiated.
+	// 5. Once all the ConsumeClaim() loops have exited, the handler's Cleanup() hook is called
+	//    to allow the user to perform any final tasks before a rebalance.
+	// 6. Finally, marked offsets are committed one last time before claims are released.
+	//
+	// Please note, that once a rebalance is triggered, sessions must be completed within
+	// Config.Consumer.Group.Rebalance.Timeout. This means that ConsumeClaim() functions must exit
+	// as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout
+	// is exceeded, the consumer will be removed from the group by Kafka, which will cause offset
+	// commit failures.
+	Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error
+
+	// Errors returns a read channel of errors that occurred during the consumer life-cycle.
+	// By default, errors are logged and not returned over this channel.
+	// If you want to implement any custom error handling, set your config's
+	// Consumer.Return.Errors setting to true, and read from this channel.
+	Errors() <-chan error
+
+	// Close stops the ConsumerGroup and detaches any running sessions. It is required to call
+	// this function before the object passes out of scope, as it will otherwise leak memory.
+	Close() error
+}
+
+type consumerGroup struct {
+	client    Client
+	ownClient bool
+
+	config   *Config
+	consumer Consumer
+	groupID  string
+	memberID string
+	errors   chan error
+
+	lock      sync.Mutex
+	closed    chan none
+	closeOnce sync.Once
+}
+
+// NewConsumerGroup creates a new consumer group the given broker addresses and configuration.
+func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerGroup, error) {
+	client, err := NewClient(addrs, config)
+	if err != nil {
+		return nil, err
+	}
+
+	c, err := NewConsumerGroupFromClient(groupID, client)
+	if err != nil {
+		_ = client.Close()
+		return nil, err
+	}
+
+	c.(*consumerGroup).ownClient = true
+	return c, nil
+}
+
+// NewConsumerGroupFromClient creates a new consumer group using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this consumer.
+// PLEASE NOTE: consumer groups can only re-use but not share clients.
+func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) {
+	config := client.Config()
+	if !config.Version.IsAtLeast(V0_10_2_0) {
+		return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0")
+	}
+
+	consumer, err := NewConsumerFromClient(client)
+	if err != nil {
+		return nil, err
+	}
+
+	return &consumerGroup{
+		client:   client,
+		consumer: consumer,
+		config:   config,
+		groupID:  groupID,
+		errors:   make(chan error, config.ChannelBufferSize),
+		closed:   make(chan none),
+	}, nil
+}
+
+// Errors implements ConsumerGroup.
+func (c *consumerGroup) Errors() <-chan error { return c.errors }
+
+// Close implements ConsumerGroup.
+func (c *consumerGroup) Close() (err error) {
+	c.closeOnce.Do(func() {
+		close(c.closed)
+
+		c.lock.Lock()
+		defer c.lock.Unlock()
+
+		// leave group
+		if e := c.leave(); e != nil {
+			err = e
+		}
+
+		// drain errors
+		go func() {
+			close(c.errors)
+		}()
+		for e := range c.errors {
+			err = e
+		}
+
+		if c.ownClient {
+			if e := c.client.Close(); e != nil {
+				err = e
+			}
+		}
+	})
+	return
+}
+
+// Consume implements ConsumerGroup.
+func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error {
+	// Ensure group is not closed
+	select {
+	case <-c.closed:
+		return ErrClosedConsumerGroup
+	default:
+	}
+
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	// Quick exit when no topics are provided
+	if len(topics) == 0 {
+		return fmt.Errorf("no topics provided")
+	}
+
+	// Refresh metadata for requested topics
+	if err := c.client.RefreshMetadata(topics...); err != nil {
+		return err
+	}
+
+	// Get coordinator
+	coordinator, err := c.client.Coordinator(c.groupID)
+	if err != nil {
+		return err
+	}
+
+	// Init session
+	sess, err := c.newSession(ctx, coordinator, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max)
+	if err == ErrClosedClient {
+		return ErrClosedConsumerGroup
+	} else if err != nil {
+		return err
+	}
+
+	// Wait for session exit signal
+	<-sess.ctx.Done()
+
+	// Gracefully release session claims
+	return sess.release(true)
+}
+
+func (c *consumerGroup) newSession(ctx context.Context, coordinator *Broker, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) {
+	// Join consumer group
+	join, err := c.joinGroupRequest(coordinator, topics)
+	if err != nil {
+		_ = coordinator.Close()
+		return nil, err
+	}
+	switch join.Err {
+	case ErrNoError:
+		c.memberID = join.MemberId
+	case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
+		c.memberID = ""
+		return c.newSession(ctx, coordinator, topics, handler, retries)
+	case ErrRebalanceInProgress: // retry after backoff
+		if retries <= 0 {
+			return nil, join.Err
+		}
+
+		select {
+		case <-c.closed:
+			return nil, ErrClosedConsumerGroup
+		case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff):
+		}
+
+		return c.newSession(ctx, coordinator, topics, handler, retries-1)
+	default:
+		return nil, join.Err
+	}
+
+	// Prepare distribution plan if we joined as the leader
+	var plan BalanceStrategyPlan
+	if join.LeaderId == join.MemberId {
+		members, err := join.GetMembers()
+		if err != nil {
+			return nil, err
+		}
+
+		plan, err = c.balance(members)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	// Sync consumer group
+	sync, err := c.syncGroupRequest(coordinator, plan, join.GenerationId)
+	if err != nil {
+		_ = coordinator.Close()
+		return nil, err
+	}
+	switch sync.Err {
+	case ErrNoError:
+	case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
+		c.memberID = ""
+		return c.newSession(ctx, coordinator, topics, handler, retries)
+	case ErrRebalanceInProgress: // retry after backoff
+		if retries <= 0 {
+			return nil, sync.Err
+		}
+
+		select {
+		case <-c.closed:
+			return nil, ErrClosedConsumerGroup
+		case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff):
+		}
+
+		return c.newSession(ctx, coordinator, topics, handler, retries-1)
+	default:
+		return nil, sync.Err
+	}
+
+	// Retrieve and sort claims
+	var claims map[string][]int32
+	if len(sync.MemberAssignment) > 0 {
+		members, err := sync.GetMemberAssignment()
+		if err != nil {
+			return nil, err
+		}
+		claims = members.Topics
+
+		for _, partitions := range claims {
+			sort.Sort(int32Slice(partitions))
+		}
+	}
+
+	return newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler)
+}
+
+func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) {
+	req := &JoinGroupRequest{
+		GroupId:        c.groupID,
+		MemberId:       c.memberID,
+		SessionTimeout: int32(c.config.Consumer.Group.Session.Timeout / time.Millisecond),
+		ProtocolType:   "consumer",
+	}
+	if c.config.Version.IsAtLeast(V0_10_1_0) {
+		req.Version = 1
+		req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond)
+	}
+
+	meta := &ConsumerGroupMemberMetadata{
+		Topics:   topics,
+		UserData: c.config.Consumer.Group.Member.UserData,
+	}
+	strategy := c.config.Consumer.Group.Rebalance.Strategy
+	if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil {
+		return nil, err
+	}
+
+	return coordinator.JoinGroup(req)
+}
+
+func (c *consumerGroup) syncGroupRequest(coordinator *Broker, plan BalanceStrategyPlan, generationID int32) (*SyncGroupResponse, error) {
+	req := &SyncGroupRequest{
+		GroupId:      c.groupID,
+		MemberId:     c.memberID,
+		GenerationId: generationID,
+	}
+	for memberID, topics := range plan {
+		err := req.AddGroupAssignmentMember(memberID, &ConsumerGroupMemberAssignment{
+			Topics: topics,
+		})
+		if err != nil {
+			return nil, err
+		}
+	}
+	return coordinator.SyncGroup(req)
+}
+
+func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, generationID int32) (*HeartbeatResponse, error) {
+	req := &HeartbeatRequest{
+		GroupId:      c.groupID,
+		MemberId:     memberID,
+		GenerationId: generationID,
+	}
+
+	return coordinator.Heartbeat(req)
+}
+
+func (c *consumerGroup) balance(members map[string]ConsumerGroupMemberMetadata) (BalanceStrategyPlan, error) {
+	topics := make(map[string][]int32)
+	for _, meta := range members {
+		for _, topic := range meta.Topics {
+			topics[topic] = nil
+		}
+	}
+
+	for topic := range topics {
+		partitions, err := c.client.Partitions(topic)
+		if err != nil {
+			return nil, err
+		}
+		topics[topic] = partitions
+	}
+
+	strategy := c.config.Consumer.Group.Rebalance.Strategy
+	return strategy.Plan(members, topics)
+}
+
+// Leaves the cluster, called by Close, protected by lock.
+func (c *consumerGroup) leave() error {
+	if c.memberID == "" {
+		return nil
+	}
+
+	coordinator, err := c.client.Coordinator(c.groupID)
+	if err != nil {
+		return err
+	}
+
+	resp, err := coordinator.LeaveGroup(&LeaveGroupRequest{
+		GroupId:  c.groupID,
+		MemberId: c.memberID,
+	})
+	if err != nil {
+		_ = coordinator.Close()
+		return err
+	}
+
+	// Unset memberID
+	c.memberID = ""
+
+	// Check response
+	switch resp.Err {
+	case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError:
+		return nil
+	default:
+		return resp.Err
+	}
+}
+
+func (c *consumerGroup) handleError(err error, topic string, partition int32) {
+	select {
+	case <-c.closed:
+		return
+	default:
+	}
+
+	if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 {
+		err = &ConsumerError{
+			Topic:     topic,
+			Partition: partition,
+			Err:       err,
+		}
+	}
+
+	if c.config.Consumer.Return.Errors {
+		select {
+		case c.errors <- err:
+		default:
+		}
+	} else {
+		Logger.Println(err)
+	}
+}
+
+// --------------------------------------------------------------------
+
+// ConsumerGroupSession represents a consumer group member session.
+type ConsumerGroupSession interface {
+	// Claims returns information about the claimed partitions by topic.
+	Claims() map[string][]int32
+
+	// MemberID returns the cluster member ID.
+	MemberID() string
+
+	// GenerationID returns the current generation ID.
+	GenerationID() int32
+
+	// MarkOffset marks the provided offset, alongside a metadata string
+	// that represents the state of the partition consumer at that point in time. The
+	// metadata string can be used by another consumer to restore that state, so it
+	// can resume consumption.
+	//
+	// To follow upstream conventions, you are expected to mark the offset of the
+	// next message to read, not the last message read. Thus, when calling `MarkOffset`
+	// you should typically add one to the offset of the last consumed message.
+	//
+	// Note: calling MarkOffset does not necessarily commit the offset to the backend
+	// store immediately for efficiency reasons, and it may never be committed if
+	// your application crashes. This means that you may end up processing the same
+	// message twice, and your processing should ideally be idempotent.
+	MarkOffset(topic string, partition int32, offset int64, metadata string)
+
+	// ResetOffset resets to the provided offset, alongside a metadata string that
+	// represents the state of the partition consumer at that point in time. Reset
+	// acts as a counterpart to MarkOffset, the difference being that it allows to
+	// reset an offset to an earlier or smaller value, where MarkOffset only
+	// allows incrementing the offset. cf MarkOffset for more details.
+	ResetOffset(topic string, partition int32, offset int64, metadata string)
+
+	// MarkMessage marks a message as consumed.
+	MarkMessage(msg *ConsumerMessage, metadata string)
+
+	// Context returns the session context.
+	Context() context.Context
+}
+
+type consumerGroupSession struct {
+	parent       *consumerGroup
+	memberID     string
+	generationID int32
+	handler      ConsumerGroupHandler
+
+	claims  map[string][]int32
+	offsets *offsetManager
+	ctx     context.Context
+	cancel  func()
+
+	waitGroup       sync.WaitGroup
+	releaseOnce     sync.Once
+	hbDying, hbDead chan none
+}
+
+func newConsumerGroupSession(ctx context.Context, parent *consumerGroup, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) {
+	// init offset manager
+	offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client)
+	if err != nil {
+		return nil, err
+	}
+
+	// init context
+	ctx, cancel := context.WithCancel(ctx)
+
+	// init session
+	sess := &consumerGroupSession{
+		parent:       parent,
+		memberID:     memberID,
+		generationID: generationID,
+		handler:      handler,
+		offsets:      offsets,
+		claims:       claims,
+		ctx:          ctx,
+		cancel:       cancel,
+		hbDying:      make(chan none),
+		hbDead:       make(chan none),
+	}
+
+	// start heartbeat loop
+	go sess.heartbeatLoop()
+
+	// create a POM for each claim
+	for topic, partitions := range claims {
+		for _, partition := range partitions {
+			pom, err := offsets.ManagePartition(topic, partition)
+			if err != nil {
+				_ = sess.release(false)
+				return nil, err
+			}
+
+			// handle POM errors
+			go func(topic string, partition int32) {
+				for err := range pom.Errors() {
+					sess.parent.handleError(err, topic, partition)
+				}
+			}(topic, partition)
+		}
+	}
+
+	// perform setup
+	if err := handler.Setup(sess); err != nil {
+		_ = sess.release(true)
+		return nil, err
+	}
+
+	// start consuming
+	for topic, partitions := range claims {
+		for _, partition := range partitions {
+			sess.waitGroup.Add(1)
+
+			go func(topic string, partition int32) {
+				defer sess.waitGroup.Done()
+
+				// cancel the as session as soon as the first
+				// goroutine exits
+				defer sess.cancel()
+
+				// consume a single topic/partition, blocking
+				sess.consume(topic, partition)
+			}(topic, partition)
+		}
+	}
+	return sess, nil
+}
+
+func (s *consumerGroupSession) Claims() map[string][]int32 { return s.claims }
+func (s *consumerGroupSession) MemberID() string           { return s.memberID }
+func (s *consumerGroupSession) GenerationID() int32        { return s.generationID }
+
+func (s *consumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) {
+	if pom := s.offsets.findPOM(topic, partition); pom != nil {
+		pom.MarkOffset(offset, metadata)
+	}
+}
+
+func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) {
+	if pom := s.offsets.findPOM(topic, partition); pom != nil {
+		pom.ResetOffset(offset, metadata)
+	}
+}
+
+func (s *consumerGroupSession) MarkMessage(msg *ConsumerMessage, metadata string) {
+	s.MarkOffset(msg.Topic, msg.Partition, msg.Offset+1, metadata)
+}
+
+func (s *consumerGroupSession) Context() context.Context {
+	return s.ctx
+}
+
+func (s *consumerGroupSession) consume(topic string, partition int32) {
+	// quick exit if rebalance is due
+	select {
+	case <-s.ctx.Done():
+		return
+	case <-s.parent.closed:
+		return
+	default:
+	}
+
+	// get next offset
+	offset := s.parent.config.Consumer.Offsets.Initial
+	if pom := s.offsets.findPOM(topic, partition); pom != nil {
+		offset, _ = pom.NextOffset()
+	}
+
+	// create new claim
+	claim, err := newConsumerGroupClaim(s, topic, partition, offset)
+	if err != nil {
+		s.parent.handleError(err, topic, partition)
+		return
+	}
+
+	// handle errors
+	go func() {
+		for err := range claim.Errors() {
+			s.parent.handleError(err, topic, partition)
+		}
+	}()
+
+	// trigger close when session is done
+	go func() {
+		select {
+		case <-s.ctx.Done():
+		case <-s.parent.closed:
+		}
+		claim.AsyncClose()
+	}()
+
+	// start processing
+	if err := s.handler.ConsumeClaim(s, claim); err != nil {
+		s.parent.handleError(err, topic, partition)
+	}
+
+	// ensure consumer is closed & drained
+	claim.AsyncClose()
+	for _, err := range claim.waitClosed() {
+		s.parent.handleError(err, topic, partition)
+	}
+}
+
+func (s *consumerGroupSession) release(withCleanup bool) (err error) {
+	// signal release, stop heartbeat
+	s.cancel()
+
+	// wait for consumers to exit
+	s.waitGroup.Wait()
+
+	// perform release
+	s.releaseOnce.Do(func() {
+		if withCleanup {
+			if e := s.handler.Cleanup(s); e != nil {
+				s.parent.handleError(err, "", -1)
+				err = e
+			}
+		}
+
+		if e := s.offsets.Close(); e != nil {
+			err = e
+		}
+
+		close(s.hbDying)
+		<-s.hbDead
+	})
+
+	return
+}
+
+func (s *consumerGroupSession) heartbeatLoop() {
+	defer close(s.hbDead)
+	defer s.cancel() // trigger the end of the session on exit
+
+	pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval)
+	defer pause.Stop()
+
+	retries := s.parent.config.Metadata.Retry.Max
+	for {
+		coordinator, err := s.parent.client.Coordinator(s.parent.groupID)
+		if err != nil {
+			if retries <= 0 {
+				s.parent.handleError(err, "", -1)
+				return
+			}
+
+			select {
+			case <-s.hbDying:
+				return
+			case <-time.After(s.parent.config.Metadata.Retry.Backoff):
+				retries--
+			}
+			continue
+		}
+
+		resp, err := s.parent.heartbeatRequest(coordinator, s.memberID, s.generationID)
+		if err != nil {
+			_ = coordinator.Close()
+			retries--
+			continue
+		}
+
+		switch resp.Err {
+		case ErrNoError:
+			retries = s.parent.config.Metadata.Retry.Max
+		case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration:
+			return
+		default:
+			s.parent.handleError(err, "", -1)
+			return
+		}
+
+		select {
+		case <-pause.C:
+		case <-s.hbDying:
+			return
+		}
+	}
+}
+
+// --------------------------------------------------------------------
+
+// ConsumerGroupHandler instances are used to handle individual topic/partition claims.
+// It also provides hooks for your consumer group session life-cycle and allow you to
+// trigger logic before or after the consume loop(s).
+//
+// PLEASE NOTE that handlers are likely be called from several goroutines concurrently,
+// ensure that all state is safely protected against race conditions.
+type ConsumerGroupHandler interface {
+	// Setup is run at the beginning of a new session, before ConsumeClaim.
+	Setup(ConsumerGroupSession) error
+
+	// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
+	// but before the offsets are committed for the very last time.
+	Cleanup(ConsumerGroupSession) error
+
+	// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
+	// Once the Messages() channel is closed, the Handler must finish its processing
+	// loop and exit.
+	ConsumeClaim(ConsumerGroupSession, ConsumerGroupClaim) error
+}
+
+// ConsumerGroupClaim processes Kafka messages from a given topic and partition within a consumer group.
+type ConsumerGroupClaim interface {
+	// Topic returns the consumed topic name.
+	Topic() string
+
+	// Partition returns the consumed partition.
+	Partition() int32
+
+	// InitialOffset returns the initial offset that was used as a starting point for this claim.
+	InitialOffset() int64
+
+	// HighWaterMarkOffset returns the high water mark offset of the partition,
+	// i.e. the offset that will be used for the next message that will be produced.
+	// You can use this to determine how far behind the processing is.
+	HighWaterMarkOffset() int64
+
+	// Messages returns the read channel for the messages that are returned by
+	// the broker. The messages channel will be closed when a new rebalance cycle
+	// is due. You must finish processing and mark offsets within
+	// Config.Consumer.Group.Session.Timeout before the topic/partition is eventually
+	// re-assigned to another group member.
+	Messages() <-chan *ConsumerMessage
+}
+
+type consumerGroupClaim struct {
+	topic     string
+	partition int32
+	offset    int64
+	PartitionConsumer
+}
+
+func newConsumerGroupClaim(sess *consumerGroupSession, topic string, partition int32, offset int64) (*consumerGroupClaim, error) {
+	pcm, err := sess.parent.consumer.ConsumePartition(topic, partition, offset)
+	if err == ErrOffsetOutOfRange {
+		offset = sess.parent.config.Consumer.Offsets.Initial
+		pcm, err = sess.parent.consumer.ConsumePartition(topic, partition, offset)
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	go func() {
+		for err := range pcm.Errors() {
+			sess.parent.handleError(err, topic, partition)
+		}
+	}()
+
+	return &consumerGroupClaim{
+		topic:             topic,
+		partition:         partition,
+		offset:            offset,
+		PartitionConsumer: pcm,
+	}, nil
+}
+
+func (c *consumerGroupClaim) Topic() string        { return c.topic }
+func (c *consumerGroupClaim) Partition() int32     { return c.partition }
+func (c *consumerGroupClaim) InitialOffset() int64 { return c.offset }
+
+// Drains messages and errors, ensures the claim is fully closed.
+func (c *consumerGroupClaim) waitClosed() (errs ConsumerErrors) {
+	go func() {
+		for range c.Messages() {
+		}
+	}()
+
+	for err := range c.Errors() {
+		errs = append(errs, err)
+	}
+	return
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go
new file mode 100644
index 0000000..2d02cc3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_group_members.go
@@ -0,0 +1,96 @@
+package sarama
+
+//ConsumerGroupMemberMetadata holds the metadata for consumer group
+type ConsumerGroupMemberMetadata struct {
+	Version  int16
+	Topics   []string
+	UserData []byte
+}
+
+func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
+	pe.putInt16(m.Version)
+
+	if err := pe.putStringArray(m.Topics); err != nil {
+		return err
+	}
+
+	if err := pe.putBytes(m.UserData); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
+	if m.Version, err = pd.getInt16(); err != nil {
+		return
+	}
+
+	if m.Topics, err = pd.getStringArray(); err != nil {
+		return
+	}
+
+	if m.UserData, err = pd.getBytes(); err != nil {
+		return
+	}
+
+	return nil
+}
+
+//ConsumerGroupMemberAssignment holds the member assignment for a consume group
+type ConsumerGroupMemberAssignment struct {
+	Version  int16
+	Topics   map[string][]int32
+	UserData []byte
+}
+
+func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
+	pe.putInt16(m.Version)
+
+	if err := pe.putArrayLength(len(m.Topics)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range m.Topics {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putInt32Array(partitions); err != nil {
+			return err
+		}
+	}
+
+	if err := pe.putBytes(m.UserData); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
+	if m.Version, err = pd.getInt16(); err != nil {
+		return
+	}
+
+	var topicLen int
+	if topicLen, err = pd.getArrayLength(); err != nil {
+		return
+	}
+
+	m.Topics = make(map[string][]int32, topicLen)
+	for i := 0; i < topicLen; i++ {
+		var topic string
+		if topic, err = pd.getString(); err != nil {
+			return
+		}
+		if m.Topics[topic], err = pd.getInt32Array(); err != nil {
+			return
+		}
+	}
+
+	if m.UserData, err = pd.getBytes(); err != nil {
+		return
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
new file mode 100644
index 0000000..a8dcaef
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
@@ -0,0 +1,34 @@
+package sarama
+
+//ConsumerMetadataRequest is used for metadata requests
+type ConsumerMetadataRequest struct {
+	ConsumerGroup string
+}
+
+func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
+	tmp := new(FindCoordinatorRequest)
+	tmp.CoordinatorKey = r.ConsumerGroup
+	tmp.CoordinatorType = CoordinatorGroup
+	return tmp.encode(pe)
+}
+
+func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
+	tmp := new(FindCoordinatorRequest)
+	if err := tmp.decode(pd, version); err != nil {
+		return err
+	}
+	r.ConsumerGroup = tmp.CoordinatorKey
+	return nil
+}
+
+func (r *ConsumerMetadataRequest) key() int16 {
+	return 10
+}
+
+func (r *ConsumerMetadataRequest) version() int16 {
+	return 0
+}
+
+func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
+	return V0_8_2_0
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
new file mode 100644
index 0000000..f39a871
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
@@ -0,0 +1,78 @@
+package sarama
+
+import (
+	"net"
+	"strconv"
+)
+
+//ConsumerMetadataResponse holds the response for a consumer group meta data requests
+type ConsumerMetadataResponse struct {
+	Err             KError
+	Coordinator     *Broker
+	CoordinatorID   int32  // deprecated: use Coordinator.ID()
+	CoordinatorHost string // deprecated: use Coordinator.Addr()
+	CoordinatorPort int32  // deprecated: use Coordinator.Addr()
+}
+
+func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
+	tmp := new(FindCoordinatorResponse)
+
+	if err := tmp.decode(pd, version); err != nil {
+		return err
+	}
+
+	r.Err = tmp.Err
+
+	r.Coordinator = tmp.Coordinator
+	if tmp.Coordinator == nil {
+		return nil
+	}
+
+	// this can all go away in 2.0, but we have to fill in deprecated fields to maintain
+	// backwards compatibility
+	host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
+	if err != nil {
+		return err
+	}
+	port, err := strconv.ParseInt(portstr, 10, 32)
+	if err != nil {
+		return err
+	}
+	r.CoordinatorID = r.Coordinator.ID()
+	r.CoordinatorHost = host
+	r.CoordinatorPort = int32(port)
+
+	return nil
+}
+
+func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
+	if r.Coordinator == nil {
+		r.Coordinator = new(Broker)
+		r.Coordinator.id = r.CoordinatorID
+		r.Coordinator.addr = net.JoinHostPort(r.CoordinatorHost, strconv.Itoa(int(r.CoordinatorPort)))
+	}
+
+	tmp := &FindCoordinatorResponse{
+		Version:     0,
+		Err:         r.Err,
+		Coordinator: r.Coordinator,
+	}
+
+	if err := tmp.encode(pe); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *ConsumerMetadataResponse) key() int16 {
+	return 10
+}
+
+func (r *ConsumerMetadataResponse) version() int16 {
+	return 0
+}
+
+func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
+	return V0_8_2_0
+}
diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go
new file mode 100644
index 0000000..1f14443
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/crc32_field.go
@@ -0,0 +1,69 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"fmt"
+	"hash/crc32"
+)
+
+type crcPolynomial int8
+
+const (
+	crcIEEE crcPolynomial = iota
+	crcCastagnoli
+)
+
+var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
+type crc32Field struct {
+	startOffset int
+	polynomial  crcPolynomial
+}
+
+func (c *crc32Field) saveOffset(in int) {
+	c.startOffset = in
+}
+
+func (c *crc32Field) reserveLength() int {
+	return 4
+}
+
+func newCRC32Field(polynomial crcPolynomial) *crc32Field {
+	return &crc32Field{polynomial: polynomial}
+}
+
+func (c *crc32Field) run(curOffset int, buf []byte) error {
+	crc, err := c.crc(curOffset, buf)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
+	return nil
+}
+
+func (c *crc32Field) check(curOffset int, buf []byte) error {
+	crc, err := c.crc(curOffset, buf)
+	if err != nil {
+		return err
+	}
+
+	expected := binary.BigEndian.Uint32(buf[c.startOffset:])
+	if crc != expected {
+		return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)}
+	}
+
+	return nil
+}
+func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) {
+	var tab *crc32.Table
+	switch c.polynomial {
+	case crcIEEE:
+		tab = crc32.IEEETable
+	case crcCastagnoli:
+		tab = castagnoliTable
+	default:
+		return 0, PacketDecodingError{"invalid CRC type"}
+	}
+	return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil
+}
diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/Shopify/sarama/create_partitions_request.go
new file mode 100644
index 0000000..af321e9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/create_partitions_request.go
@@ -0,0 +1,121 @@
+package sarama
+
+import "time"
+
+type CreatePartitionsRequest struct {
+	TopicPartitions map[string]*TopicPartition
+	Timeout         time.Duration
+	ValidateOnly    bool
+}
+
+func (c *CreatePartitionsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil {
+		return err
+	}
+
+	for topic, partition := range c.TopicPartitions {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := partition.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putInt32(int32(c.Timeout / time.Millisecond))
+
+	pe.putBool(c.ValidateOnly)
+
+	return nil
+}
+
+func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	c.TopicPartitions = make(map[string]*TopicPartition, n)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		c.TopicPartitions[topic] = new(TopicPartition)
+		if err := c.TopicPartitions[topic].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	c.Timeout = time.Duration(timeout) * time.Millisecond
+
+	if c.ValidateOnly, err = pd.getBool(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *CreatePartitionsRequest) key() int16 {
+	return 37
+}
+
+func (r *CreatePartitionsRequest) version() int16 {
+	return 0
+}
+
+func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion {
+	return V1_0_0_0
+}
+
+type TopicPartition struct {
+	Count      int32
+	Assignment [][]int32
+}
+
+func (t *TopicPartition) encode(pe packetEncoder) error {
+	pe.putInt32(t.Count)
+
+	if len(t.Assignment) == 0 {
+		pe.putInt32(-1)
+		return nil
+	}
+
+	if err := pe.putArrayLength(len(t.Assignment)); err != nil {
+		return err
+	}
+
+	for _, assign := range t.Assignment {
+		if err := pe.putInt32Array(assign); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) {
+	if t.Count, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	n, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	if n <= 0 {
+		return nil
+	}
+	t.Assignment = make([][]int32, n)
+
+	for i := 0; i < int(n); i++ {
+		if t.Assignment[i], err = pd.getInt32Array(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go
new file mode 100644
index 0000000..abd621c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/create_partitions_response.go
@@ -0,0 +1,94 @@
+package sarama
+
+import "time"
+
+type CreatePartitionsResponse struct {
+	ThrottleTime         time.Duration
+	TopicPartitionErrors map[string]*TopicPartitionError
+}
+
+func (c *CreatePartitionsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
+	if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil {
+		return err
+	}
+
+	for topic, partitionError := range c.TopicPartitionErrors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := partitionError.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		c.TopicPartitionErrors[topic] = new(TopicPartitionError)
+		if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *CreatePartitionsResponse) key() int16 {
+	return 37
+}
+
+func (r *CreatePartitionsResponse) version() int16 {
+	return 0
+}
+
+func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion {
+	return V1_0_0_0
+}
+
+type TopicPartitionError struct {
+	Err    KError
+	ErrMsg *string
+}
+
+func (t *TopicPartitionError) encode(pe packetEncoder) error {
+	pe.putInt16(int16(t.Err))
+
+	if err := pe.putNullableString(t.ErrMsg); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	t.Err = KError(kerr)
+
+	if t.ErrMsg, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/Shopify/sarama/create_topics_request.go
new file mode 100644
index 0000000..709c0a4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/create_topics_request.go
@@ -0,0 +1,174 @@
+package sarama
+
+import (
+	"time"
+)
+
+type CreateTopicsRequest struct {
+	Version int16
+
+	TopicDetails map[string]*TopicDetail
+	Timeout      time.Duration
+	ValidateOnly bool
+}
+
+func (c *CreateTopicsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(c.TopicDetails)); err != nil {
+		return err
+	}
+	for topic, detail := range c.TopicDetails {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := detail.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putInt32(int32(c.Timeout / time.Millisecond))
+
+	if c.Version >= 1 {
+		pe.putBool(c.ValidateOnly)
+	}
+
+	return nil
+}
+
+func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.TopicDetails = make(map[string]*TopicDetail, n)
+
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		c.TopicDetails[topic] = new(TopicDetail)
+		if err = c.TopicDetails[topic].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	c.Timeout = time.Duration(timeout) * time.Millisecond
+
+	if version >= 1 {
+		c.ValidateOnly, err = pd.getBool()
+		if err != nil {
+			return err
+		}
+
+		c.Version = version
+	}
+
+	return nil
+}
+
+func (c *CreateTopicsRequest) key() int16 {
+	return 19
+}
+
+func (c *CreateTopicsRequest) version() int16 {
+	return c.Version
+}
+
+func (c *CreateTopicsRequest) requiredVersion() KafkaVersion {
+	switch c.Version {
+	case 2:
+		return V1_0_0_0
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_10_1_0
+	}
+}
+
+type TopicDetail struct {
+	NumPartitions     int32
+	ReplicationFactor int16
+	ReplicaAssignment map[int32][]int32
+	ConfigEntries     map[string]*string
+}
+
+func (t *TopicDetail) encode(pe packetEncoder) error {
+	pe.putInt32(t.NumPartitions)
+	pe.putInt16(t.ReplicationFactor)
+
+	if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil {
+		return err
+	}
+	for partition, assignment := range t.ReplicaAssignment {
+		pe.putInt32(partition)
+		if err := pe.putInt32Array(assignment); err != nil {
+			return err
+		}
+	}
+
+	if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil {
+		return err
+	}
+	for configKey, configValue := range t.ConfigEntries {
+		if err := pe.putString(configKey); err != nil {
+			return err
+		}
+		if err := pe.putNullableString(configValue); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) {
+	if t.NumPartitions, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if t.ReplicationFactor, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		t.ReplicaAssignment = make(map[int32][]int32, n)
+		for i := 0; i < n; i++ {
+			replica, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil {
+				return err
+			}
+		}
+	}
+
+	n, err = pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		t.ConfigEntries = make(map[string]*string, n)
+		for i := 0; i < n; i++ {
+			configKey, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go
new file mode 100644
index 0000000..66207e0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/create_topics_response.go
@@ -0,0 +1,112 @@
+package sarama
+
+import "time"
+
+type CreateTopicsResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	TopicErrors  map[string]*TopicError
+}
+
+func (c *CreateTopicsResponse) encode(pe packetEncoder) error {
+	if c.Version >= 2 {
+		pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
+	}
+
+	if err := pe.putArrayLength(len(c.TopicErrors)); err != nil {
+		return err
+	}
+	for topic, topicError := range c.TopicErrors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := topicError.encode(pe, c.Version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
+	c.Version = version
+
+	if version >= 2 {
+		throttleTime, err := pd.getInt32()
+		if err != nil {
+			return err
+		}
+		c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.TopicErrors = make(map[string]*TopicError, n)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		c.TopicErrors[topic] = new(TopicError)
+		if err := c.TopicErrors[topic].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateTopicsResponse) key() int16 {
+	return 19
+}
+
+func (c *CreateTopicsResponse) version() int16 {
+	return c.Version
+}
+
+func (c *CreateTopicsResponse) requiredVersion() KafkaVersion {
+	switch c.Version {
+	case 2:
+		return V1_0_0_0
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_10_1_0
+	}
+}
+
+type TopicError struct {
+	Err    KError
+	ErrMsg *string
+}
+
+func (t *TopicError) encode(pe packetEncoder, version int16) error {
+	pe.putInt16(int16(t.Err))
+
+	if version >= 1 {
+		if err := pe.putNullableString(t.ErrMsg); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (t *TopicError) decode(pd packetDecoder, version int16) (err error) {
+	kErr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	t.Err = KError(kErr)
+
+	if version >= 1 {
+		if t.ErrMsg, err = pd.getNullableString(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go
new file mode 100644
index 0000000..eaccbfc
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/decompress.go
@@ -0,0 +1,63 @@
+package sarama
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"io/ioutil"
+	"sync"
+
+	"github.com/eapache/go-xerial-snappy"
+	"github.com/pierrec/lz4"
+)
+
+var (
+	lz4ReaderPool = sync.Pool{
+		New: func() interface{} {
+			return lz4.NewReader(nil)
+		},
+	}
+
+	gzipReaderPool sync.Pool
+)
+
+func decompress(cc CompressionCodec, data []byte) ([]byte, error) {
+	switch cc {
+	case CompressionNone:
+		return data, nil
+	case CompressionGZIP:
+		var (
+			err        error
+			reader     *gzip.Reader
+			readerIntf = gzipReaderPool.Get()
+		)
+		if readerIntf != nil {
+			reader = readerIntf.(*gzip.Reader)
+		} else {
+			reader, err = gzip.NewReader(bytes.NewReader(data))
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		defer gzipReaderPool.Put(reader)
+
+		if err := reader.Reset(bytes.NewReader(data)); err != nil {
+			return nil, err
+		}
+
+		return ioutil.ReadAll(reader)
+	case CompressionSnappy:
+		return snappy.Decode(data)
+	case CompressionLZ4:
+		reader := lz4ReaderPool.Get().(*lz4.Reader)
+		defer lz4ReaderPool.Put(reader)
+
+		reader.Reset(bytes.NewReader(data))
+		return ioutil.ReadAll(reader)
+	case CompressionZSTD:
+		return zstdDecompress(nil, data)
+	default:
+		return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)}
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/delete_groups_request.go b/vendor/github.com/Shopify/sarama/delete_groups_request.go
new file mode 100644
index 0000000..305a324
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/delete_groups_request.go
@@ -0,0 +1,30 @@
+package sarama
+
+type DeleteGroupsRequest struct {
+	Groups []string
+}
+
+func (r *DeleteGroupsRequest) encode(pe packetEncoder) error {
+	return pe.putStringArray(r.Groups)
+}
+
+func (r *DeleteGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Groups, err = pd.getStringArray()
+	return
+}
+
+func (r *DeleteGroupsRequest) key() int16 {
+	return 42
+}
+
+func (r *DeleteGroupsRequest) version() int16 {
+	return 0
+}
+
+func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion {
+	return V1_1_0_0
+}
+
+func (r *DeleteGroupsRequest) AddGroup(group string) {
+	r.Groups = append(r.Groups, group)
+}
diff --git a/vendor/github.com/Shopify/sarama/delete_groups_response.go b/vendor/github.com/Shopify/sarama/delete_groups_response.go
new file mode 100644
index 0000000..c067ebb
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/delete_groups_response.go
@@ -0,0 +1,70 @@
+package sarama
+
+import (
+	"time"
+)
+
+type DeleteGroupsResponse struct {
+	ThrottleTime    time.Duration
+	GroupErrorCodes map[string]KError
+}
+
+func (r *DeleteGroupsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+
+	if err := pe.putArrayLength(len(r.GroupErrorCodes)); err != nil {
+		return err
+	}
+	for groupID, errorCode := range r.GroupErrorCodes {
+		if err := pe.putString(groupID); err != nil {
+			return err
+		}
+		pe.putInt16(int16(errorCode))
+	}
+
+	return nil
+}
+
+func (r *DeleteGroupsResponse) decode(pd packetDecoder, version int16) error {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		return nil
+	}
+
+	r.GroupErrorCodes = make(map[string]KError, n)
+	for i := 0; i < n; i++ {
+		groupID, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		errorCode, err := pd.getInt16()
+		if err != nil {
+			return err
+		}
+
+		r.GroupErrorCodes[groupID] = KError(errorCode)
+	}
+
+	return nil
+}
+
+func (r *DeleteGroupsResponse) key() int16 {
+	return 42
+}
+
+func (r *DeleteGroupsResponse) version() int16 {
+	return 0
+}
+
+func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion {
+	return V1_1_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/delete_records_request.go b/vendor/github.com/Shopify/sarama/delete_records_request.go
new file mode 100644
index 0000000..93efafd
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/delete_records_request.go
@@ -0,0 +1,126 @@
+package sarama
+
+import (
+	"sort"
+	"time"
+)
+
+// request message format is:
+// [topic] timeout(int32)
+// where topic is:
+//  name(string) [partition]
+// where partition is:
+//  id(int32) offset(int64)
+
+type DeleteRecordsRequest struct {
+	Topics  map[string]*DeleteRecordsRequestTopic
+	Timeout time.Duration
+}
+
+func (d *DeleteRecordsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(d.Topics)); err != nil {
+		return err
+	}
+	keys := make([]string, 0, len(d.Topics))
+	for topic := range d.Topics {
+		keys = append(keys, topic)
+	}
+	sort.Strings(keys)
+	for _, topic := range keys {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := d.Topics[topic].encode(pe); err != nil {
+			return err
+		}
+	}
+	pe.putInt32(int32(d.Timeout / time.Millisecond))
+
+	return nil
+}
+
+func (d *DeleteRecordsRequest) decode(pd packetDecoder, version int16) error {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		d.Topics = make(map[string]*DeleteRecordsRequestTopic, n)
+		for i := 0; i < n; i++ {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			details := new(DeleteRecordsRequestTopic)
+			if err = details.decode(pd, version); err != nil {
+				return err
+			}
+			d.Topics[topic] = details
+		}
+	}
+
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	d.Timeout = time.Duration(timeout) * time.Millisecond
+
+	return nil
+}
+
+func (d *DeleteRecordsRequest) key() int16 {
+	return 21
+}
+
+func (d *DeleteRecordsRequest) version() int16 {
+	return 0
+}
+
+func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
+
+type DeleteRecordsRequestTopic struct {
+	PartitionOffsets map[int32]int64 // partition => offset
+}
+
+func (t *DeleteRecordsRequestTopic) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(t.PartitionOffsets)); err != nil {
+		return err
+	}
+	keys := make([]int32, 0, len(t.PartitionOffsets))
+	for partition := range t.PartitionOffsets {
+		keys = append(keys, partition)
+	}
+	sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
+	for _, partition := range keys {
+		pe.putInt32(partition)
+		pe.putInt64(t.PartitionOffsets[partition])
+	}
+	return nil
+}
+
+func (t *DeleteRecordsRequestTopic) decode(pd packetDecoder, version int16) error {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		t.PartitionOffsets = make(map[int32]int64, n)
+		for i := 0; i < n; i++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			offset, err := pd.getInt64()
+			if err != nil {
+				return err
+			}
+			t.PartitionOffsets[partition] = offset
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/delete_records_response.go b/vendor/github.com/Shopify/sarama/delete_records_response.go
new file mode 100644
index 0000000..733a58b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/delete_records_response.go
@@ -0,0 +1,158 @@
+package sarama
+
+import (
+	"sort"
+	"time"
+)
+
+// response message format is:
+// throttleMs(int32) [topic]
+// where topic is:
+//  name(string) [partition]
+// where partition is:
+//  id(int32) low_watermark(int64) error_code(int16)
+
+type DeleteRecordsResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Topics       map[string]*DeleteRecordsResponseTopic
+}
+
+func (d *DeleteRecordsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
+
+	if err := pe.putArrayLength(len(d.Topics)); err != nil {
+		return err
+	}
+	keys := make([]string, 0, len(d.Topics))
+	for topic := range d.Topics {
+		keys = append(keys, topic)
+	}
+	sort.Strings(keys)
+	for _, topic := range keys {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := d.Topics[topic].encode(pe); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (d *DeleteRecordsResponse) decode(pd packetDecoder, version int16) error {
+	d.Version = version
+
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		d.Topics = make(map[string]*DeleteRecordsResponseTopic, n)
+		for i := 0; i < n; i++ {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			details := new(DeleteRecordsResponseTopic)
+			if err = details.decode(pd, version); err != nil {
+				return err
+			}
+			d.Topics[topic] = details
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteRecordsResponse) key() int16 {
+	return 21
+}
+
+func (d *DeleteRecordsResponse) version() int16 {
+	return 0
+}
+
+func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
+
+type DeleteRecordsResponseTopic struct {
+	Partitions map[int32]*DeleteRecordsResponsePartition
+}
+
+func (t *DeleteRecordsResponseTopic) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(t.Partitions)); err != nil {
+		return err
+	}
+	keys := make([]int32, 0, len(t.Partitions))
+	for partition := range t.Partitions {
+		keys = append(keys, partition)
+	}
+	sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
+	for _, partition := range keys {
+		pe.putInt32(partition)
+		if err := t.Partitions[partition].encode(pe); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (t *DeleteRecordsResponseTopic) decode(pd packetDecoder, version int16) error {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		t.Partitions = make(map[int32]*DeleteRecordsResponsePartition, n)
+		for i := 0; i < n; i++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			details := new(DeleteRecordsResponsePartition)
+			if err = details.decode(pd, version); err != nil {
+				return err
+			}
+			t.Partitions[partition] = details
+		}
+	}
+
+	return nil
+}
+
+type DeleteRecordsResponsePartition struct {
+	LowWatermark int64
+	Err          KError
+}
+
+func (t *DeleteRecordsResponsePartition) encode(pe packetEncoder) error {
+	pe.putInt64(t.LowWatermark)
+	pe.putInt16(int16(t.Err))
+	return nil
+}
+
+func (t *DeleteRecordsResponsePartition) decode(pd packetDecoder, version int16) error {
+	lowWatermark, err := pd.getInt64()
+	if err != nil {
+		return err
+	}
+	t.LowWatermark = lowWatermark
+
+	kErr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	t.Err = KError(kErr)
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/Shopify/sarama/delete_topics_request.go
new file mode 100644
index 0000000..911f67d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/delete_topics_request.go
@@ -0,0 +1,48 @@
+package sarama
+
+import "time"
+
+type DeleteTopicsRequest struct {
+	Version int16
+	Topics  []string
+	Timeout time.Duration
+}
+
+func (d *DeleteTopicsRequest) encode(pe packetEncoder) error {
+	if err := pe.putStringArray(d.Topics); err != nil {
+		return err
+	}
+	pe.putInt32(int32(d.Timeout / time.Millisecond))
+
+	return nil
+}
+
+func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
+	if d.Topics, err = pd.getStringArray(); err != nil {
+		return err
+	}
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	d.Timeout = time.Duration(timeout) * time.Millisecond
+	d.Version = version
+	return nil
+}
+
+func (d *DeleteTopicsRequest) key() int16 {
+	return 20
+}
+
+func (d *DeleteTopicsRequest) version() int16 {
+	return d.Version
+}
+
+func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_10_1_0
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/Shopify/sarama/delete_topics_response.go
new file mode 100644
index 0000000..3422546
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/delete_topics_response.go
@@ -0,0 +1,78 @@
+package sarama
+
+import "time"
+
+type DeleteTopicsResponse struct {
+	Version         int16
+	ThrottleTime    time.Duration
+	TopicErrorCodes map[string]KError
+}
+
+func (d *DeleteTopicsResponse) encode(pe packetEncoder) error {
+	if d.Version >= 1 {
+		pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
+	}
+
+	if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil {
+		return err
+	}
+	for topic, errorCode := range d.TopicErrorCodes {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		pe.putInt16(int16(errorCode))
+	}
+
+	return nil
+}
+
+func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if version >= 1 {
+		throttleTime, err := pd.getInt32()
+		if err != nil {
+			return err
+		}
+		d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+		d.Version = version
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	d.TopicErrorCodes = make(map[string]KError, n)
+
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		errorCode, err := pd.getInt16()
+		if err != nil {
+			return err
+		}
+
+		d.TopicErrorCodes[topic] = KError(errorCode)
+	}
+
+	return nil
+}
+
+func (d *DeleteTopicsResponse) key() int16 {
+	return 20
+}
+
+func (d *DeleteTopicsResponse) version() int16 {
+	return d.Version
+}
+
+func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_10_1_0
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/Shopify/sarama/describe_configs_request.go
new file mode 100644
index 0000000..416a4fe
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_configs_request.go
@@ -0,0 +1,112 @@
+package sarama
+
+type DescribeConfigsRequest struct {
+	Version         int16
+	Resources       []*ConfigResource
+	IncludeSynonyms bool
+}
+
+type ConfigResource struct {
+	Type        ConfigResourceType
+	Name        string
+	ConfigNames []string
+}
+
+func (r *DescribeConfigsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(r.Resources)); err != nil {
+		return err
+	}
+
+	for _, c := range r.Resources {
+		pe.putInt8(int8(c.Type))
+		if err := pe.putString(c.Name); err != nil {
+			return err
+		}
+
+		if len(c.ConfigNames) == 0 {
+			pe.putInt32(-1)
+			continue
+		}
+		if err := pe.putStringArray(c.ConfigNames); err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 1 {
+		pe.putBool(r.IncludeSynonyms)
+	}
+
+	return nil
+}
+
+func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Resources = make([]*ConfigResource, n)
+
+	for i := 0; i < n; i++ {
+		r.Resources[i] = &ConfigResource{}
+		t, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		r.Resources[i].Type = ConfigResourceType(t)
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		r.Resources[i].Name = name
+
+		confLength, err := pd.getArrayLength()
+
+		if err != nil {
+			return err
+		}
+
+		if confLength == -1 {
+			continue
+		}
+
+		cfnames := make([]string, confLength)
+		for i := 0; i < confLength; i++ {
+			s, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			cfnames[i] = s
+		}
+		r.Resources[i].ConfigNames = cfnames
+	}
+	r.Version = version
+	if r.Version >= 1 {
+		b, err := pd.getBool()
+		if err != nil {
+			return err
+		}
+		r.IncludeSynonyms = b
+	}
+
+	return nil
+}
+
+func (r *DescribeConfigsRequest) key() int16 {
+	return 32
+}
+
+func (r *DescribeConfigsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V1_0_0_0
+	case 2:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go
new file mode 100644
index 0000000..63fb6ea
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_configs_response.go
@@ -0,0 +1,320 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+type ConfigSource int8
+
+func (s ConfigSource) String() string {
+	switch s {
+	case SourceUnknown:
+		return "Unknown"
+	case SourceTopic:
+		return "Topic"
+	case SourceDynamicBroker:
+		return "DynamicBroker"
+	case SourceDynamicDefaultBroker:
+		return "DynamicDefaultBroker"
+	case SourceStaticBroker:
+		return "StaticBroker"
+	case SourceDefault:
+		return "Default"
+	}
+	return fmt.Sprintf("Source Invalid: %d", int(s))
+}
+
+const (
+	SourceUnknown              ConfigSource = 0
+	SourceTopic                ConfigSource = 1
+	SourceDynamicBroker        ConfigSource = 2
+	SourceDynamicDefaultBroker ConfigSource = 3
+	SourceStaticBroker         ConfigSource = 4
+	SourceDefault              ConfigSource = 5
+)
+
+type DescribeConfigsResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Resources    []*ResourceResponse
+}
+
+type ResourceResponse struct {
+	ErrorCode int16
+	ErrorMsg  string
+	Type      ConfigResourceType
+	Name      string
+	Configs   []*ConfigEntry
+}
+
+type ConfigEntry struct {
+	Name      string
+	Value     string
+	ReadOnly  bool
+	Default   bool
+	Source    ConfigSource
+	Sensitive bool
+	Synonyms  []*ConfigSynonym
+}
+
+type ConfigSynonym struct {
+	ConfigName  string
+	ConfigValue string
+	Source      ConfigSource
+}
+
+func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) {
+	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+	if err = pe.putArrayLength(len(r.Resources)); err != nil {
+		return err
+	}
+
+	for _, c := range r.Resources {
+		if err = c.encode(pe, r.Version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Resources = make([]*ResourceResponse, n)
+	for i := 0; i < n; i++ {
+		rr := &ResourceResponse{}
+		if err := rr.decode(pd, version); err != nil {
+			return err
+		}
+		r.Resources[i] = rr
+	}
+
+	return nil
+}
+
+func (r *DescribeConfigsResponse) key() int16 {
+	return 32
+}
+
+func (r *DescribeConfigsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V1_0_0_0
+	case 2:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
+
+func (r *ResourceResponse) encode(pe packetEncoder, version int16) (err error) {
+	pe.putInt16(r.ErrorCode)
+
+	if err = pe.putString(r.ErrorMsg); err != nil {
+		return err
+	}
+
+	pe.putInt8(int8(r.Type))
+
+	if err = pe.putString(r.Name); err != nil {
+		return err
+	}
+
+	if err = pe.putArrayLength(len(r.Configs)); err != nil {
+		return err
+	}
+
+	for _, c := range r.Configs {
+		if err = c.encode(pe, version); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) {
+	ec, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	r.ErrorCode = ec
+
+	em, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.ErrorMsg = em
+
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	r.Type = ConfigResourceType(t)
+
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Name = name
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Configs = make([]*ConfigEntry, n)
+	for i := 0; i < n; i++ {
+		c := &ConfigEntry{}
+		if err := c.decode(pd, version); err != nil {
+			return err
+		}
+		r.Configs[i] = c
+	}
+	return nil
+}
+
+func (r *ConfigEntry) encode(pe packetEncoder, version int16) (err error) {
+	if err = pe.putString(r.Name); err != nil {
+		return err
+	}
+
+	if err = pe.putString(r.Value); err != nil {
+		return err
+	}
+
+	pe.putBool(r.ReadOnly)
+
+	if version <= 0 {
+		pe.putBool(r.Default)
+		pe.putBool(r.Sensitive)
+	} else {
+		pe.putInt8(int8(r.Source))
+		pe.putBool(r.Sensitive)
+
+		if err := pe.putArrayLength(len(r.Synonyms)); err != nil {
+			return err
+		}
+		for _, c := range r.Synonyms {
+			if err = c.encode(pe, version); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+//https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration
+func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) {
+	if version == 0 {
+		r.Source = SourceUnknown
+	}
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Name = name
+
+	value, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Value = value
+
+	read, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	r.ReadOnly = read
+
+	if version == 0 {
+		defaultB, err := pd.getBool()
+		if err != nil {
+			return err
+		}
+		r.Default = defaultB
+	} else {
+		source, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		r.Source = ConfigSource(source)
+	}
+
+	sensitive, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	r.Sensitive = sensitive
+
+	if version > 0 {
+		n, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.Synonyms = make([]*ConfigSynonym, n)
+
+		for i := 0; i < n; i++ {
+			s := &ConfigSynonym{}
+			if err := s.decode(pd, version); err != nil {
+				return err
+			}
+			r.Synonyms[i] = s
+		}
+
+	}
+	return nil
+}
+
+func (c *ConfigSynonym) encode(pe packetEncoder, version int16) (err error) {
+	err = pe.putString(c.ConfigName)
+	if err != nil {
+		return err
+	}
+
+	err = pe.putString(c.ConfigValue)
+	if err != nil {
+		return err
+	}
+
+	pe.putInt8(int8(c.Source))
+
+	return nil
+}
+
+func (c *ConfigSynonym) decode(pd packetDecoder, version int16) error {
+	name, err := pd.getString()
+	if err != nil {
+		return nil
+	}
+	c.ConfigName = name
+
+	value, err := pd.getString()
+	if err != nil {
+		return nil
+	}
+	c.ConfigValue = value
+
+	source, err := pd.getInt8()
+	if err != nil {
+		return nil
+	}
+	c.Source = ConfigSource(source)
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go
new file mode 100644
index 0000000..1fb3567
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_groups_request.go
@@ -0,0 +1,30 @@
+package sarama
+
+type DescribeGroupsRequest struct {
+	Groups []string
+}
+
+func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
+	return pe.putStringArray(r.Groups)
+}
+
+func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Groups, err = pd.getStringArray()
+	return
+}
+
+func (r *DescribeGroupsRequest) key() int16 {
+	return 15
+}
+
+func (r *DescribeGroupsRequest) version() int16 {
+	return 0
+}
+
+func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
+
+func (r *DescribeGroupsRequest) AddGroup(group string) {
+	r.Groups = append(r.Groups, group)
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go
new file mode 100644
index 0000000..542b3a9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_groups_response.go
@@ -0,0 +1,187 @@
+package sarama
+
+type DescribeGroupsResponse struct {
+	Groups []*GroupDescription
+}
+
+func (r *DescribeGroupsResponse) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(r.Groups)); err != nil {
+		return err
+	}
+
+	for _, groupDescription := range r.Groups {
+		if err := groupDescription.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Groups = make([]*GroupDescription, n)
+	for i := 0; i < n; i++ {
+		r.Groups[i] = new(GroupDescription)
+		if err := r.Groups[i].decode(pd); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeGroupsResponse) key() int16 {
+	return 15
+}
+
+func (r *DescribeGroupsResponse) version() int16 {
+	return 0
+}
+
+func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
+
+type GroupDescription struct {
+	Err          KError
+	GroupId      string
+	State        string
+	ProtocolType string
+	Protocol     string
+	Members      map[string]*GroupMemberDescription
+}
+
+func (gd *GroupDescription) encode(pe packetEncoder) error {
+	pe.putInt16(int16(gd.Err))
+
+	if err := pe.putString(gd.GroupId); err != nil {
+		return err
+	}
+	if err := pe.putString(gd.State); err != nil {
+		return err
+	}
+	if err := pe.putString(gd.ProtocolType); err != nil {
+		return err
+	}
+	if err := pe.putString(gd.Protocol); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(gd.Members)); err != nil {
+		return err
+	}
+
+	for memberId, groupMemberDescription := range gd.Members {
+		if err := pe.putString(memberId); err != nil {
+			return err
+		}
+		if err := groupMemberDescription.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	gd.Err = KError(kerr)
+
+	if gd.GroupId, err = pd.getString(); err != nil {
+		return
+	}
+	if gd.State, err = pd.getString(); err != nil {
+		return
+	}
+	if gd.ProtocolType, err = pd.getString(); err != nil {
+		return
+	}
+	if gd.Protocol, err = pd.getString(); err != nil {
+		return
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		return nil
+	}
+
+	gd.Members = make(map[string]*GroupMemberDescription)
+	for i := 0; i < n; i++ {
+		memberId, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		gd.Members[memberId] = new(GroupMemberDescription)
+		if err := gd.Members[memberId].decode(pd); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+type GroupMemberDescription struct {
+	ClientId         string
+	ClientHost       string
+	MemberMetadata   []byte
+	MemberAssignment []byte
+}
+
+func (gmd *GroupMemberDescription) encode(pe packetEncoder) error {
+	if err := pe.putString(gmd.ClientId); err != nil {
+		return err
+	}
+	if err := pe.putString(gmd.ClientHost); err != nil {
+		return err
+	}
+	if err := pe.putBytes(gmd.MemberMetadata); err != nil {
+		return err
+	}
+	if err := pe.putBytes(gmd.MemberAssignment); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) {
+	if gmd.ClientId, err = pd.getString(); err != nil {
+		return
+	}
+	if gmd.ClientHost, err = pd.getString(); err != nil {
+		return
+	}
+	if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
+		return
+	}
+	if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
+		return
+	}
+
+	return nil
+}
+
+func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
+	assignment := new(ConsumerGroupMemberAssignment)
+	err := decode(gmd.MemberAssignment, assignment)
+	return assignment, err
+}
+
+func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
+	metadata := new(ConsumerGroupMemberMetadata)
+	err := decode(gmd.MemberMetadata, metadata)
+	return metadata, err
+}
diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml
new file mode 100644
index 0000000..97eed3a
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/dev.yml
@@ -0,0 +1,10 @@
+name: sarama
+
+up:
+  - go:
+      version: '1.11'
+
+commands:
+  test:
+    run: make test
+    desc: 'run unit tests'
diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go
new file mode 100644
index 0000000..7ce3bc0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/encoder_decoder.go
@@ -0,0 +1,89 @@
+package sarama
+
+import (
+	"fmt"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+// Encoder is the interface that wraps the basic Encode method.
+// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
+type encoder interface {
+	encode(pe packetEncoder) error
+}
+
+// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
+func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
+	if e == nil {
+		return nil, nil
+	}
+
+	var prepEnc prepEncoder
+	var realEnc realEncoder
+
+	err := e.encode(&prepEnc)
+	if err != nil {
+		return nil, err
+	}
+
+	if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
+		return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
+	}
+
+	realEnc.raw = make([]byte, prepEnc.length)
+	realEnc.registry = metricRegistry
+	err = e.encode(&realEnc)
+	if err != nil {
+		return nil, err
+	}
+
+	return realEnc.raw, nil
+}
+
+// Decoder is the interface that wraps the basic Decode method.
+// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
+type decoder interface {
+	decode(pd packetDecoder) error
+}
+
+type versionedDecoder interface {
+	decode(pd packetDecoder, version int16) error
+}
+
+// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
+// interpreted using Kafka's encoding rules.
+func decode(buf []byte, in decoder) error {
+	if buf == nil {
+		return nil
+	}
+
+	helper := realDecoder{raw: buf}
+	err := in.decode(&helper)
+	if err != nil {
+		return err
+	}
+
+	if helper.off != len(buf) {
+		return PacketDecodingError{"invalid length"}
+	}
+
+	return nil
+}
+
+func versionedDecode(buf []byte, in versionedDecoder, version int16) error {
+	if buf == nil {
+		return nil
+	}
+
+	helper := realDecoder{raw: buf}
+	err := in.decode(&helper, version)
+	if err != nil {
+		return err
+	}
+
+	if helper.off != len(buf) {
+		return PacketDecodingError{"invalid length"}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/Shopify/sarama/end_txn_request.go
new file mode 100644
index 0000000..2cd9b50
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/end_txn_request.go
@@ -0,0 +1,50 @@
+package sarama
+
+type EndTxnRequest struct {
+	TransactionalID   string
+	ProducerID        int64
+	ProducerEpoch     int16
+	TransactionResult bool
+}
+
+func (a *EndTxnRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(a.TransactionalID); err != nil {
+		return err
+	}
+
+	pe.putInt64(a.ProducerID)
+
+	pe.putInt16(a.ProducerEpoch)
+
+	pe.putBool(a.TransactionResult)
+
+	return nil
+}
+
+func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) {
+	if a.TransactionalID, err = pd.getString(); err != nil {
+		return err
+	}
+	if a.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if a.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+	if a.TransactionResult, err = pd.getBool(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (a *EndTxnRequest) key() int16 {
+	return 26
+}
+
+func (a *EndTxnRequest) version() int16 {
+	return 0
+}
+
+func (a *EndTxnRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/Shopify/sarama/end_txn_response.go
new file mode 100644
index 0000000..33b27e3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/end_txn_response.go
@@ -0,0 +1,44 @@
+package sarama
+
+import (
+	"time"
+)
+
+type EndTxnResponse struct {
+	ThrottleTime time.Duration
+	Err          KError
+}
+
+func (e *EndTxnResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(e.ThrottleTime / time.Millisecond))
+	pe.putInt16(int16(e.Err))
+	return nil
+}
+
+func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	e.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	e.Err = KError(kerr)
+
+	return nil
+}
+
+func (e *EndTxnResponse) key() int16 {
+	return 25
+}
+
+func (e *EndTxnResponse) version() int16 {
+	return 0
+}
+
+func (e *EndTxnResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go
new file mode 100644
index 0000000..c11421d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/errors.go
@@ -0,0 +1,317 @@
+package sarama
+
+import (
+	"errors"
+	"fmt"
+)
+
+// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
+// or otherwise failed to respond.
+var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
+
+// ErrClosedClient is the error returned when a method is called on a client that has been closed.
+var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
+
+// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
+// not contain the expected information.
+var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
+
+// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
+// (meaning one outside of the range [0...numPartitions-1]).
+var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
+
+// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
+var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
+
+// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
+var ErrNotConnected = errors.New("kafka: broker not connected")
+
+// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
+// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
+// of the message set.
+var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
+
+// ErrShuttingDown is returned when a producer receives a message during shutdown.
+var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
+
+// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
+var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
+
+// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing
+// a RecordBatch.
+var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch")
+
+// ErrControllerNotAvailable is returned when server didn't give correct controller id. May be kafka server's version
+// is lower than 0.10.0.0.
+var ErrControllerNotAvailable = errors.New("kafka: controller is not available")
+
+// ErrNoTopicsToUpdateMetadata is returned when Meta.Full is set to false but no specific topics were found to update
+// the metadata.
+var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata")
+
+// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
+// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
+type PacketEncodingError struct {
+	Info string
+}
+
+func (err PacketEncodingError) Error() string {
+	return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
+}
+
+// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
+// This can be a bad CRC or length field, or any other invalid value.
+type PacketDecodingError struct {
+	Info string
+}
+
+func (err PacketDecodingError) Error() string {
+	return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
+}
+
+// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
+// when the specified configuration is invalid.
+type ConfigurationError string
+
+func (err ConfigurationError) Error() string {
+	return "kafka: invalid configuration (" + string(err) + ")"
+}
+
+// KError is the type of error that can be returned directly by the Kafka broker.
+// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
+type KError int16
+
+// Numeric error codes returned by the Kafka server.
+const (
+	ErrNoError                            KError = 0
+	ErrUnknown                            KError = -1
+	ErrOffsetOutOfRange                   KError = 1
+	ErrInvalidMessage                     KError = 2
+	ErrUnknownTopicOrPartition            KError = 3
+	ErrInvalidMessageSize                 KError = 4
+	ErrLeaderNotAvailable                 KError = 5
+	ErrNotLeaderForPartition              KError = 6
+	ErrRequestTimedOut                    KError = 7
+	ErrBrokerNotAvailable                 KError = 8
+	ErrReplicaNotAvailable                KError = 9
+	ErrMessageSizeTooLarge                KError = 10
+	ErrStaleControllerEpochCode           KError = 11
+	ErrOffsetMetadataTooLarge             KError = 12
+	ErrNetworkException                   KError = 13
+	ErrOffsetsLoadInProgress              KError = 14
+	ErrConsumerCoordinatorNotAvailable    KError = 15
+	ErrNotCoordinatorForConsumer          KError = 16
+	ErrInvalidTopic                       KError = 17
+	ErrMessageSetSizeTooLarge             KError = 18
+	ErrNotEnoughReplicas                  KError = 19
+	ErrNotEnoughReplicasAfterAppend       KError = 20
+	ErrInvalidRequiredAcks                KError = 21
+	ErrIllegalGeneration                  KError = 22
+	ErrInconsistentGroupProtocol          KError = 23
+	ErrInvalidGroupId                     KError = 24
+	ErrUnknownMemberId                    KError = 25
+	ErrInvalidSessionTimeout              KError = 26
+	ErrRebalanceInProgress                KError = 27
+	ErrInvalidCommitOffsetSize            KError = 28
+	ErrTopicAuthorizationFailed           KError = 29
+	ErrGroupAuthorizationFailed           KError = 30
+	ErrClusterAuthorizationFailed         KError = 31
+	ErrInvalidTimestamp                   KError = 32
+	ErrUnsupportedSASLMechanism           KError = 33
+	ErrIllegalSASLState                   KError = 34
+	ErrUnsupportedVersion                 KError = 35
+	ErrTopicAlreadyExists                 KError = 36
+	ErrInvalidPartitions                  KError = 37
+	ErrInvalidReplicationFactor           KError = 38
+	ErrInvalidReplicaAssignment           KError = 39
+	ErrInvalidConfig                      KError = 40
+	ErrNotController                      KError = 41
+	ErrInvalidRequest                     KError = 42
+	ErrUnsupportedForMessageFormat        KError = 43
+	ErrPolicyViolation                    KError = 44
+	ErrOutOfOrderSequenceNumber           KError = 45
+	ErrDuplicateSequenceNumber            KError = 46
+	ErrInvalidProducerEpoch               KError = 47
+	ErrInvalidTxnState                    KError = 48
+	ErrInvalidProducerIDMapping           KError = 49
+	ErrInvalidTransactionTimeout          KError = 50
+	ErrConcurrentTransactions             KError = 51
+	ErrTransactionCoordinatorFenced       KError = 52
+	ErrTransactionalIDAuthorizationFailed KError = 53
+	ErrSecurityDisabled                   KError = 54
+	ErrOperationNotAttempted              KError = 55
+	ErrKafkaStorageError                  KError = 56
+	ErrLogDirNotFound                     KError = 57
+	ErrSASLAuthenticationFailed           KError = 58
+	ErrUnknownProducerID                  KError = 59
+	ErrReassignmentInProgress             KError = 60
+	ErrDelegationTokenAuthDisabled        KError = 61
+	ErrDelegationTokenNotFound            KError = 62
+	ErrDelegationTokenOwnerMismatch       KError = 63
+	ErrDelegationTokenRequestNotAllowed   KError = 64
+	ErrDelegationTokenAuthorizationFailed KError = 65
+	ErrDelegationTokenExpired             KError = 66
+	ErrInvalidPrincipalType               KError = 67
+	ErrNonEmptyGroup                      KError = 68
+	ErrGroupIDNotFound                    KError = 69
+	ErrFetchSessionIDNotFound             KError = 70
+	ErrInvalidFetchSessionEpoch           KError = 71
+	ErrListenerNotFound                   KError = 72
+)
+
+func (err KError) Error() string {
+	// Error messages stolen/adapted from
+	// https://kafka.apache.org/protocol#protocol_error_codes
+	switch err {
+	case ErrNoError:
+		return "kafka server: Not an error, why are you printing me?"
+	case ErrUnknown:
+		return "kafka server: Unexpected (unknown?) server error."
+	case ErrOffsetOutOfRange:
+		return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
+	case ErrInvalidMessage:
+		return "kafka server: Message contents does not match its CRC."
+	case ErrUnknownTopicOrPartition:
+		return "kafka server: Request was for a topic or partition that does not exist on this broker."
+	case ErrInvalidMessageSize:
+		return "kafka server: The message has a negative size."
+	case ErrLeaderNotAvailable:
+		return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
+	case ErrNotLeaderForPartition:
+		return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
+	case ErrRequestTimedOut:
+		return "kafka server: Request exceeded the user-specified time limit in the request."
+	case ErrBrokerNotAvailable:
+		return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
+	case ErrReplicaNotAvailable:
+		return "kafka server: Replica information not available, one or more brokers are down."
+	case ErrMessageSizeTooLarge:
+		return "kafka server: Message was too large, server rejected it to avoid allocation error."
+	case ErrStaleControllerEpochCode:
+		return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
+	case ErrOffsetMetadataTooLarge:
+		return "kafka server: Specified a string larger than the configured maximum for offset metadata."
+	case ErrNetworkException:
+		return "kafka server: The server disconnected before a response was received."
+	case ErrOffsetsLoadInProgress:
+		return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
+	case ErrConsumerCoordinatorNotAvailable:
+		return "kafka server: Offset's topic has not yet been created."
+	case ErrNotCoordinatorForConsumer:
+		return "kafka server: Request was for a consumer group that is not coordinated by this broker."
+	case ErrInvalidTopic:
+		return "kafka server: The request attempted to perform an operation on an invalid topic."
+	case ErrMessageSetSizeTooLarge:
+		return "kafka server: The request included message batch larger than the configured segment size on the server."
+	case ErrNotEnoughReplicas:
+		return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
+	case ErrNotEnoughReplicasAfterAppend:
+		return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
+	case ErrInvalidRequiredAcks:
+		return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."
+	case ErrIllegalGeneration:
+		return "kafka server: The provided generation id is not the current generation."
+	case ErrInconsistentGroupProtocol:
+		return "kafka server: The provider group protocol type is incompatible with the other members."
+	case ErrInvalidGroupId:
+		return "kafka server: The provided group id was empty."
+	case ErrUnknownMemberId:
+		return "kafka server: The provided member is not known in the current generation."
+	case ErrInvalidSessionTimeout:
+		return "kafka server: The provided session timeout is outside the allowed range."
+	case ErrRebalanceInProgress:
+		return "kafka server: A rebalance for the group is in progress. Please re-join the group."
+	case ErrInvalidCommitOffsetSize:
+		return "kafka server: The provided commit metadata was too large."
+	case ErrTopicAuthorizationFailed:
+		return "kafka server: The client is not authorized to access this topic."
+	case ErrGroupAuthorizationFailed:
+		return "kafka server: The client is not authorized to access this group."
+	case ErrClusterAuthorizationFailed:
+		return "kafka server: The client is not authorized to send this request type."
+	case ErrInvalidTimestamp:
+		return "kafka server: The timestamp of the message is out of acceptable range."
+	case ErrUnsupportedSASLMechanism:
+		return "kafka server: The broker does not support the requested SASL mechanism."
+	case ErrIllegalSASLState:
+		return "kafka server: Request is not valid given the current SASL state."
+	case ErrUnsupportedVersion:
+		return "kafka server: The version of API is not supported."
+	case ErrTopicAlreadyExists:
+		return "kafka server: Topic with this name already exists."
+	case ErrInvalidPartitions:
+		return "kafka server: Number of partitions is invalid."
+	case ErrInvalidReplicationFactor:
+		return "kafka server: Replication-factor is invalid."
+	case ErrInvalidReplicaAssignment:
+		return "kafka server: Replica assignment is invalid."
+	case ErrInvalidConfig:
+		return "kafka server: Configuration is invalid."
+	case ErrNotController:
+		return "kafka server: This is not the correct controller for this cluster."
+	case ErrInvalidRequest:
+		return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."
+	case ErrUnsupportedForMessageFormat:
+		return "kafka server: The requested operation is not supported by the message format version."
+	case ErrPolicyViolation:
+		return "kafka server: Request parameters do not satisfy the configured policy."
+	case ErrOutOfOrderSequenceNumber:
+		return "kafka server: The broker received an out of order sequence number."
+	case ErrDuplicateSequenceNumber:
+		return "kafka server: The broker received a duplicate sequence number."
+	case ErrInvalidProducerEpoch:
+		return "kafka server: Producer attempted an operation with an old epoch."
+	case ErrInvalidTxnState:
+		return "kafka server: The producer attempted a transactional operation in an invalid state."
+	case ErrInvalidProducerIDMapping:
+		return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id."
+	case ErrInvalidTransactionTimeout:
+		return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)."
+	case ErrConcurrentTransactions:
+		return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing."
+	case ErrTransactionCoordinatorFenced:
+		return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer."
+	case ErrTransactionalIDAuthorizationFailed:
+		return "kafka server: Transactional ID authorization failed."
+	case ErrSecurityDisabled:
+		return "kafka server: Security features are disabled."
+	case ErrOperationNotAttempted:
+		return "kafka server: The broker did not attempt to execute this operation."
+	case ErrKafkaStorageError:
+		return "kafka server: Disk error when trying to access log file on the disk."
+	case ErrLogDirNotFound:
+		return "kafka server: The specified log directory is not found in the broker config."
+	case ErrSASLAuthenticationFailed:
+		return "kafka server: SASL Authentication failed."
+	case ErrUnknownProducerID:
+		return "kafka server: The broker could not locate the producer metadata associated with the Producer ID."
+	case ErrReassignmentInProgress:
+		return "kafka server: A partition reassignment is in progress."
+	case ErrDelegationTokenAuthDisabled:
+		return "kafka server: Delegation Token feature is not enabled."
+	case ErrDelegationTokenNotFound:
+		return "kafka server: Delegation Token is not found on server."
+	case ErrDelegationTokenOwnerMismatch:
+		return "kafka server: Specified Principal is not valid Owner/Renewer."
+	case ErrDelegationTokenRequestNotAllowed:
+		return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels."
+	case ErrDelegationTokenAuthorizationFailed:
+		return "kafka server: Delegation Token authorization failed."
+	case ErrDelegationTokenExpired:
+		return "kafka server: Delegation Token is expired."
+	case ErrInvalidPrincipalType:
+		return "kafka server: Supplied principalType is not supported."
+	case ErrNonEmptyGroup:
+		return "kafka server: The group is not empty."
+	case ErrGroupIDNotFound:
+		return "kafka server: The group id does not exist."
+	case ErrFetchSessionIDNotFound:
+		return "kafka server: The fetch session ID was not found."
+	case ErrInvalidFetchSessionEpoch:
+		return "kafka server: The fetch session epoch is invalid."
+	case ErrListenerNotFound:
+		return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed."
+	}
+
+	return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
+}
diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go
new file mode 100644
index 0000000..462ab8a
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/fetch_request.go
@@ -0,0 +1,170 @@
+package sarama
+
+type fetchRequestBlock struct {
+	fetchOffset int64
+	maxBytes    int32
+}
+
+func (b *fetchRequestBlock) encode(pe packetEncoder) error {
+	pe.putInt64(b.fetchOffset)
+	pe.putInt32(b.maxBytes)
+	return nil
+}
+
+func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
+	if b.fetchOffset, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if b.maxBytes, err = pd.getInt32(); err != nil {
+		return err
+	}
+	return nil
+}
+
+// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
+// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that.  The KIP is at
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
+type FetchRequest struct {
+	MaxWaitTime int32
+	MinBytes    int32
+	MaxBytes    int32
+	Version     int16
+	Isolation   IsolationLevel
+	blocks      map[string]map[int32]*fetchRequestBlock
+}
+
+type IsolationLevel int8
+
+const (
+	ReadUncommitted IsolationLevel = 0
+	ReadCommitted   IsolationLevel = 1
+)
+
+func (r *FetchRequest) encode(pe packetEncoder) (err error) {
+	pe.putInt32(-1) // replica ID is always -1 for clients
+	pe.putInt32(r.MaxWaitTime)
+	pe.putInt32(r.MinBytes)
+	if r.Version >= 3 {
+		pe.putInt32(r.MaxBytes)
+	}
+	if r.Version >= 4 {
+		pe.putInt8(int8(r.Isolation))
+	}
+	err = pe.putArrayLength(len(r.blocks))
+	if err != nil {
+		return err
+	}
+	for topic, blocks := range r.blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putArrayLength(len(blocks))
+		if err != nil {
+			return err
+		}
+		for partition, block := range blocks {
+			pe.putInt32(partition)
+			err = block.encode(pe)
+			if err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if _, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if r.MaxWaitTime, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if r.MinBytes, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if r.Version >= 3 {
+		if r.MaxBytes, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+	if r.Version >= 4 {
+		isolation, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		r.Isolation = IsolationLevel(isolation)
+	}
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount == 0 {
+		return nil
+	}
+	r.blocks = make(map[string]map[int32]*fetchRequestBlock)
+	for i := 0; i < topicCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitionCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.blocks[topic] = make(map[int32]*fetchRequestBlock)
+		for j := 0; j < partitionCount; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			fetchBlock := &fetchRequestBlock{}
+			if err = fetchBlock.decode(pd); err != nil {
+				return err
+			}
+			r.blocks[topic][partition] = fetchBlock
+		}
+	}
+	return nil
+}
+
+func (r *FetchRequest) key() int16 {
+	return 1
+}
+
+func (r *FetchRequest) version() int16 {
+	return r.Version
+}
+
+func (r *FetchRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_9_0_0
+	case 2:
+		return V0_10_0_0
+	case 3:
+		return V0_10_1_0
+	case 4:
+		return V0_11_0_0
+	default:
+		return MinVersion
+	}
+}
+
+func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
+	if r.blocks == nil {
+		r.blocks = make(map[string]map[int32]*fetchRequestBlock)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = make(map[int32]*fetchRequestBlock)
+	}
+
+	tmp := new(fetchRequestBlock)
+	tmp.maxBytes = maxBytes
+	tmp.fetchOffset = fetchOffset
+
+	r.blocks[topic][partitionID] = tmp
+}
diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go
new file mode 100644
index 0000000..90acfc2
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/fetch_response.go
@@ -0,0 +1,396 @@
+package sarama
+
+import (
+	"time"
+)
+
+type AbortedTransaction struct {
+	ProducerID  int64
+	FirstOffset int64
+}
+
+func (t *AbortedTransaction) decode(pd packetDecoder) (err error) {
+	if t.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	if t.FirstOffset, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (t *AbortedTransaction) encode(pe packetEncoder) (err error) {
+	pe.putInt64(t.ProducerID)
+	pe.putInt64(t.FirstOffset)
+
+	return nil
+}
+
+type FetchResponseBlock struct {
+	Err                 KError
+	HighWaterMarkOffset int64
+	LastStableOffset    int64
+	AbortedTransactions []*AbortedTransaction
+	Records             *Records // deprecated: use FetchResponseBlock.RecordsSet
+	RecordsSet          []*Records
+	Partial             bool
+}
+
+func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+	tmp, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	b.Err = KError(tmp)
+
+	b.HighWaterMarkOffset, err = pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	if version >= 4 {
+		b.LastStableOffset, err = pd.getInt64()
+		if err != nil {
+			return err
+		}
+
+		numTransact, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		if numTransact >= 0 {
+			b.AbortedTransactions = make([]*AbortedTransaction, numTransact)
+		}
+
+		for i := 0; i < numTransact; i++ {
+			transact := new(AbortedTransaction)
+			if err = transact.decode(pd); err != nil {
+				return err
+			}
+			b.AbortedTransactions[i] = transact
+		}
+	}
+
+	recordsSize, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	recordsDecoder, err := pd.getSubset(int(recordsSize))
+	if err != nil {
+		return err
+	}
+
+	b.RecordsSet = []*Records{}
+
+	for recordsDecoder.remaining() > 0 {
+		records := &Records{}
+		if err := records.decode(recordsDecoder); err != nil {
+			// If we have at least one decoded records, this is not an error
+			if err == ErrInsufficientData {
+				if len(b.RecordsSet) == 0 {
+					b.Partial = true
+				}
+				break
+			}
+			return err
+		}
+
+		partial, err := records.isPartial()
+		if err != nil {
+			return err
+		}
+
+		n, err := records.numRecords()
+		if err != nil {
+			return err
+		}
+
+		if n > 0 || (partial && len(b.RecordsSet) == 0) {
+			b.RecordsSet = append(b.RecordsSet, records)
+
+			if b.Records == nil {
+				b.Records = records
+			}
+		}
+
+		overflow, err := records.isOverflow()
+		if err != nil {
+			return err
+		}
+
+		if partial || overflow {
+			break
+		}
+	}
+
+	return nil
+}
+
+func (b *FetchResponseBlock) numRecords() (int, error) {
+	sum := 0
+
+	for _, records := range b.RecordsSet {
+		count, err := records.numRecords()
+		if err != nil {
+			return 0, err
+		}
+
+		sum += count
+	}
+
+	return sum, nil
+}
+
+func (b *FetchResponseBlock) isPartial() (bool, error) {
+	if b.Partial {
+		return true, nil
+	}
+
+	if len(b.RecordsSet) == 1 {
+		return b.RecordsSet[0].isPartial()
+	}
+
+	return false, nil
+}
+
+func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+	pe.putInt16(int16(b.Err))
+
+	pe.putInt64(b.HighWaterMarkOffset)
+
+	if version >= 4 {
+		pe.putInt64(b.LastStableOffset)
+
+		if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
+			return err
+		}
+		for _, transact := range b.AbortedTransactions {
+			if err = transact.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	pe.push(&lengthField{})
+	for _, records := range b.RecordsSet {
+		err = records.encode(pe)
+		if err != nil {
+			return err
+		}
+	}
+	return pe.pop()
+}
+
+type FetchResponse struct {
+	Blocks       map[string]map[int32]*FetchResponseBlock
+	ThrottleTime time.Duration
+	Version      int16 // v1 requires 0.9+, v2 requires 0.10+
+}
+
+func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.Version >= 1 {
+		throttle, err := pd.getInt32()
+		if err != nil {
+			return err
+		}
+		r.ThrottleTime = time.Duration(throttle) * time.Millisecond
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numBlocks, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
+
+		for j := 0; j < numBlocks; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			block := new(FetchResponseBlock)
+			err = block.decode(pd, version)
+			if err != nil {
+				return err
+			}
+			r.Blocks[name][id] = block
+		}
+	}
+
+	return nil
+}
+
+func (r *FetchResponse) encode(pe packetEncoder) (err error) {
+	if r.Version >= 1 {
+		pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+	}
+
+	err = pe.putArrayLength(len(r.Blocks))
+	if err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.Blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+
+		for id, block := range partitions {
+			pe.putInt32(id)
+			err = block.encode(pe, r.Version)
+			if err != nil {
+				return err
+			}
+		}
+
+	}
+	return nil
+}
+
+func (r *FetchResponse) key() int16 {
+	return 1
+}
+
+func (r *FetchResponse) version() int16 {
+	return r.Version
+}
+
+func (r *FetchResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_9_0_0
+	case 2:
+		return V0_10_0_0
+	case 3:
+		return V0_10_1_0
+	case 4:
+		return V0_11_0_0
+	default:
+		return MinVersion
+	}
+}
+
+func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
+	if r.Blocks == nil {
+		return nil
+	}
+
+	if r.Blocks[topic] == nil {
+		return nil
+	}
+
+	return r.Blocks[topic][partition]
+}
+
+func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+	}
+	partitions, ok := r.Blocks[topic]
+	if !ok {
+		partitions = make(map[int32]*FetchResponseBlock)
+		r.Blocks[topic] = partitions
+	}
+	frb, ok := partitions[partition]
+	if !ok {
+		frb = new(FetchResponseBlock)
+		partitions[partition] = frb
+	}
+	frb.Err = err
+}
+
+func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+	}
+	partitions, ok := r.Blocks[topic]
+	if !ok {
+		partitions = make(map[int32]*FetchResponseBlock)
+		r.Blocks[topic] = partitions
+	}
+	frb, ok := partitions[partition]
+	if !ok {
+		frb = new(FetchResponseBlock)
+		partitions[partition] = frb
+	}
+
+	return frb
+}
+
+func encodeKV(key, value Encoder) ([]byte, []byte) {
+	var kb []byte
+	var vb []byte
+	if key != nil {
+		kb, _ = key.Encode()
+	}
+	if value != nil {
+		vb, _ = value.Encode()
+	}
+
+	return kb, vb
+}
+
+func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
+	frb := r.getOrCreateBlock(topic, partition)
+	kb, vb := encodeKV(key, value)
+	msg := &Message{Key: kb, Value: vb}
+	msgBlock := &MessageBlock{Msg: msg, Offset: offset}
+	if len(frb.RecordsSet) == 0 {
+		records := newLegacyRecords(&MessageSet{})
+		frb.RecordsSet = []*Records{&records}
+	}
+	set := frb.RecordsSet[0].MsgSet
+	set.Messages = append(set.Messages, msgBlock)
+}
+
+func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
+	frb := r.getOrCreateBlock(topic, partition)
+	kb, vb := encodeKV(key, value)
+	rec := &Record{Key: kb, Value: vb, OffsetDelta: offset}
+	if len(frb.RecordsSet) == 0 {
+		records := newDefaultRecords(&RecordBatch{Version: 2})
+		frb.RecordsSet = []*Records{&records}
+	}
+	batch := frb.RecordsSet[0].RecordBatch
+	batch.addRecord(rec)
+}
+
+func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) {
+	frb := r.getOrCreateBlock(topic, partition)
+	if len(frb.RecordsSet) == 0 {
+		records := newDefaultRecords(&RecordBatch{Version: 2})
+		frb.RecordsSet = []*Records{&records}
+	}
+	batch := frb.RecordsSet[0].RecordBatch
+	batch.LastOffsetDelta = offset
+}
+
+func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) {
+	frb := r.getOrCreateBlock(topic, partition)
+	frb.LastStableOffset = offset
+}
diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/Shopify/sarama/find_coordinator_request.go
new file mode 100644
index 0000000..0ab5cb5
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/find_coordinator_request.go
@@ -0,0 +1,61 @@
+package sarama
+
+type CoordinatorType int8
+
+const (
+	CoordinatorGroup       CoordinatorType = 0
+	CoordinatorTransaction CoordinatorType = 1
+)
+
+type FindCoordinatorRequest struct {
+	Version         int16
+	CoordinatorKey  string
+	CoordinatorType CoordinatorType
+}
+
+func (f *FindCoordinatorRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(f.CoordinatorKey); err != nil {
+		return err
+	}
+
+	if f.Version >= 1 {
+		pe.putInt8(int8(f.CoordinatorType))
+	}
+
+	return nil
+}
+
+func (f *FindCoordinatorRequest) decode(pd packetDecoder, version int16) (err error) {
+	if f.CoordinatorKey, err = pd.getString(); err != nil {
+		return err
+	}
+
+	if version >= 1 {
+		f.Version = version
+		coordinatorType, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+
+		f.CoordinatorType = CoordinatorType(coordinatorType)
+	}
+
+	return nil
+}
+
+func (f *FindCoordinatorRequest) key() int16 {
+	return 10
+}
+
+func (f *FindCoordinatorRequest) version() int16 {
+	return f.Version
+}
+
+func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion {
+	switch f.Version {
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_8_2_0
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_response.go b/vendor/github.com/Shopify/sarama/find_coordinator_response.go
new file mode 100644
index 0000000..9c900e8
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/find_coordinator_response.go
@@ -0,0 +1,92 @@
+package sarama
+
+import (
+	"time"
+)
+
+var NoNode = &Broker{id: -1, addr: ":-1"}
+
+type FindCoordinatorResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Err          KError
+	ErrMsg       *string
+	Coordinator  *Broker
+}
+
+func (f *FindCoordinatorResponse) decode(pd packetDecoder, version int16) (err error) {
+	if version >= 1 {
+		f.Version = version
+
+		throttleTime, err := pd.getInt32()
+		if err != nil {
+			return err
+		}
+		f.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+	}
+
+	tmp, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	f.Err = KError(tmp)
+
+	if version >= 1 {
+		if f.ErrMsg, err = pd.getNullableString(); err != nil {
+			return err
+		}
+	}
+
+	coordinator := new(Broker)
+	// The version is hardcoded to 0, as version 1 of the Broker-decode
+	// contains the rack-field which is not present in the FindCoordinatorResponse.
+	if err := coordinator.decode(pd, 0); err != nil {
+		return err
+	}
+	if coordinator.addr == ":0" {
+		return nil
+	}
+	f.Coordinator = coordinator
+
+	return nil
+}
+
+func (f *FindCoordinatorResponse) encode(pe packetEncoder) error {
+	if f.Version >= 1 {
+		pe.putInt32(int32(f.ThrottleTime / time.Millisecond))
+	}
+
+	pe.putInt16(int16(f.Err))
+
+	if f.Version >= 1 {
+		if err := pe.putNullableString(f.ErrMsg); err != nil {
+			return err
+		}
+	}
+
+	coordinator := f.Coordinator
+	if coordinator == nil {
+		coordinator = NoNode
+	}
+	if err := coordinator.encode(pe, 0); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (f *FindCoordinatorResponse) key() int16 {
+	return 10
+}
+
+func (f *FindCoordinatorResponse) version() int16 {
+	return f.Version
+}
+
+func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion {
+	switch f.Version {
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_8_2_0
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go
new file mode 100644
index 0000000..ce49c47
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/heartbeat_request.go
@@ -0,0 +1,47 @@
+package sarama
+
+type HeartbeatRequest struct {
+	GroupId      string
+	GenerationId int32
+	MemberId     string
+}
+
+func (r *HeartbeatRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.GroupId); err != nil {
+		return err
+	}
+
+	pe.putInt32(r.GenerationId)
+
+	if err := pe.putString(r.MemberId); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
+	if r.GroupId, err = pd.getString(); err != nil {
+		return
+	}
+	if r.GenerationId, err = pd.getInt32(); err != nil {
+		return
+	}
+	if r.MemberId, err = pd.getString(); err != nil {
+		return
+	}
+
+	return nil
+}
+
+func (r *HeartbeatRequest) key() int16 {
+	return 12
+}
+
+func (r *HeartbeatRequest) version() int16 {
+	return 0
+}
+
+func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go
new file mode 100644
index 0000000..766f5fd
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/heartbeat_response.go
@@ -0,0 +1,32 @@
+package sarama
+
+type HeartbeatResponse struct {
+	Err KError
+}
+
+func (r *HeartbeatResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.Err))
+	return nil
+}
+
+func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	r.Err = KError(kerr)
+
+	return nil
+}
+
+func (r *HeartbeatResponse) key() int16 {
+	return 12
+}
+
+func (r *HeartbeatResponse) version() int16 {
+	return 0
+}
+
+func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/Shopify/sarama/init_producer_id_request.go
new file mode 100644
index 0000000..8ceb6c2
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/init_producer_id_request.go
@@ -0,0 +1,43 @@
+package sarama
+
+import "time"
+
+type InitProducerIDRequest struct {
+	TransactionalID    *string
+	TransactionTimeout time.Duration
+}
+
+func (i *InitProducerIDRequest) encode(pe packetEncoder) error {
+	if err := pe.putNullableString(i.TransactionalID); err != nil {
+		return err
+	}
+	pe.putInt32(int32(i.TransactionTimeout / time.Millisecond))
+
+	return nil
+}
+
+func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) {
+	if i.TransactionalID, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	i.TransactionTimeout = time.Duration(timeout) * time.Millisecond
+
+	return nil
+}
+
+func (i *InitProducerIDRequest) key() int16 {
+	return 22
+}
+
+func (i *InitProducerIDRequest) version() int16 {
+	return 0
+}
+
+func (i *InitProducerIDRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/Shopify/sarama/init_producer_id_response.go
new file mode 100644
index 0000000..1b32eb0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/init_producer_id_response.go
@@ -0,0 +1,55 @@
+package sarama
+
+import "time"
+
+type InitProducerIDResponse struct {
+	ThrottleTime  time.Duration
+	Err           KError
+	ProducerID    int64
+	ProducerEpoch int16
+}
+
+func (i *InitProducerIDResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(i.ThrottleTime / time.Millisecond))
+	pe.putInt16(int16(i.Err))
+	pe.putInt64(i.ProducerID)
+	pe.putInt16(i.ProducerEpoch)
+
+	return nil
+}
+
+func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	i.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	i.Err = KError(kerr)
+
+	if i.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	if i.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (i *InitProducerIDResponse) key() int16 {
+	return 22
+}
+
+func (i *InitProducerIDResponse) version() int16 {
+	return 0
+}
+
+func (i *InitProducerIDResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go
new file mode 100644
index 0000000..97e9299
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/join_group_request.go
@@ -0,0 +1,163 @@
+package sarama
+
+type GroupProtocol struct {
+	Name     string
+	Metadata []byte
+}
+
+func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
+	p.Name, err = pd.getString()
+	if err != nil {
+		return err
+	}
+	p.Metadata, err = pd.getBytes()
+	return err
+}
+
+func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
+	if err := pe.putString(p.Name); err != nil {
+		return err
+	}
+	if err := pe.putBytes(p.Metadata); err != nil {
+		return err
+	}
+	return nil
+}
+
+type JoinGroupRequest struct {
+	Version               int16
+	GroupId               string
+	SessionTimeout        int32
+	RebalanceTimeout      int32
+	MemberId              string
+	ProtocolType          string
+	GroupProtocols        map[string][]byte // deprecated; use OrderedGroupProtocols
+	OrderedGroupProtocols []*GroupProtocol
+}
+
+func (r *JoinGroupRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.GroupId); err != nil {
+		return err
+	}
+	pe.putInt32(r.SessionTimeout)
+	if r.Version >= 1 {
+		pe.putInt32(r.RebalanceTimeout)
+	}
+	if err := pe.putString(r.MemberId); err != nil {
+		return err
+	}
+	if err := pe.putString(r.ProtocolType); err != nil {
+		return err
+	}
+
+	if len(r.GroupProtocols) > 0 {
+		if len(r.OrderedGroupProtocols) > 0 {
+			return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
+		}
+
+		if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
+			return err
+		}
+		for name, metadata := range r.GroupProtocols {
+			if err := pe.putString(name); err != nil {
+				return err
+			}
+			if err := pe.putBytes(metadata); err != nil {
+				return err
+			}
+		}
+	} else {
+		if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
+			return err
+		}
+		for _, protocol := range r.OrderedGroupProtocols {
+			if err := protocol.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.GroupId, err = pd.getString(); err != nil {
+		return
+	}
+
+	if r.SessionTimeout, err = pd.getInt32(); err != nil {
+		return
+	}
+
+	if version >= 1 {
+		if r.RebalanceTimeout, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	if r.MemberId, err = pd.getString(); err != nil {
+		return
+	}
+
+	if r.ProtocolType, err = pd.getString(); err != nil {
+		return
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		return nil
+	}
+
+	r.GroupProtocols = make(map[string][]byte)
+	for i := 0; i < n; i++ {
+		protocol := &GroupProtocol{}
+		if err := protocol.decode(pd); err != nil {
+			return err
+		}
+		r.GroupProtocols[protocol.Name] = protocol.Metadata
+		r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
+	}
+
+	return nil
+}
+
+func (r *JoinGroupRequest) key() int16 {
+	return 11
+}
+
+func (r *JoinGroupRequest) version() int16 {
+	return r.Version
+}
+
+func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 2:
+		return V0_11_0_0
+	case 1:
+		return V0_10_1_0
+	default:
+		return V0_9_0_0
+	}
+}
+
+func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
+	r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
+		Name:     name,
+		Metadata: metadata,
+	})
+}
+
+func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
+	bin, err := encode(metadata, nil)
+	if err != nil {
+		return err
+	}
+
+	r.AddGroupProtocol(name, bin)
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go
new file mode 100644
index 0000000..5752acc
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/join_group_response.go
@@ -0,0 +1,135 @@
+package sarama
+
+type JoinGroupResponse struct {
+	Version       int16
+	ThrottleTime  int32
+	Err           KError
+	GenerationId  int32
+	GroupProtocol string
+	LeaderId      string
+	MemberId      string
+	Members       map[string][]byte
+}
+
+func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
+	members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
+	for id, bin := range r.Members {
+		meta := new(ConsumerGroupMemberMetadata)
+		if err := decode(bin, meta); err != nil {
+			return nil, err
+		}
+		members[id] = *meta
+	}
+	return members, nil
+}
+
+func (r *JoinGroupResponse) encode(pe packetEncoder) error {
+	if r.Version >= 2 {
+		pe.putInt32(r.ThrottleTime)
+	}
+	pe.putInt16(int16(r.Err))
+	pe.putInt32(r.GenerationId)
+
+	if err := pe.putString(r.GroupProtocol); err != nil {
+		return err
+	}
+	if err := pe.putString(r.LeaderId); err != nil {
+		return err
+	}
+	if err := pe.putString(r.MemberId); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.Members)); err != nil {
+		return err
+	}
+
+	for memberId, memberMetadata := range r.Members {
+		if err := pe.putString(memberId); err != nil {
+			return err
+		}
+
+		if err := pe.putBytes(memberMetadata); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if version >= 2 {
+		if r.ThrottleTime, err = pd.getInt32(); err != nil {
+			return
+		}
+	}
+
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.Err = KError(kerr)
+
+	if r.GenerationId, err = pd.getInt32(); err != nil {
+		return
+	}
+
+	if r.GroupProtocol, err = pd.getString(); err != nil {
+		return
+	}
+
+	if r.LeaderId, err = pd.getString(); err != nil {
+		return
+	}
+
+	if r.MemberId, err = pd.getString(); err != nil {
+		return
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		return nil
+	}
+
+	r.Members = make(map[string][]byte)
+	for i := 0; i < n; i++ {
+		memberId, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		memberMetadata, err := pd.getBytes()
+		if err != nil {
+			return err
+		}
+
+		r.Members[memberId] = memberMetadata
+	}
+
+	return nil
+}
+
+func (r *JoinGroupResponse) key() int16 {
+	return 11
+}
+
+func (r *JoinGroupResponse) version() int16 {
+	return r.Version
+}
+
+func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 2:
+		return V0_11_0_0
+	case 1:
+		return V0_10_1_0
+	default:
+		return V0_9_0_0
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go
new file mode 100644
index 0000000..e177427
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/leave_group_request.go
@@ -0,0 +1,40 @@
+package sarama
+
+type LeaveGroupRequest struct {
+	GroupId  string
+	MemberId string
+}
+
+func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.GroupId); err != nil {
+		return err
+	}
+	if err := pe.putString(r.MemberId); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+	if r.GroupId, err = pd.getString(); err != nil {
+		return
+	}
+	if r.MemberId, err = pd.getString(); err != nil {
+		return
+	}
+
+	return nil
+}
+
+func (r *LeaveGroupRequest) key() int16 {
+	return 13
+}
+
+func (r *LeaveGroupRequest) version() int16 {
+	return 0
+}
+
+func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go
new file mode 100644
index 0000000..d60c626
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/leave_group_response.go
@@ -0,0 +1,32 @@
+package sarama
+
+type LeaveGroupResponse struct {
+	Err KError
+}
+
+func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.Err))
+	return nil
+}
+
+func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	r.Err = KError(kerr)
+
+	return nil
+}
+
+func (r *LeaveGroupResponse) key() int16 {
+	return 13
+}
+
+func (r *LeaveGroupResponse) version() int16 {
+	return 0
+}
+
+func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/Shopify/sarama/length_field.go
new file mode 100644
index 0000000..da199a7
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/length_field.go
@@ -0,0 +1,82 @@
+package sarama
+
+import "encoding/binary"
+
+// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
+type lengthField struct {
+	startOffset int
+	length      int32
+}
+
+func (l *lengthField) decode(pd packetDecoder) error {
+	var err error
+	l.length, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+	if l.length > int32(pd.remaining()) {
+		return ErrInsufficientData
+	}
+	return nil
+}
+
+func (l *lengthField) saveOffset(in int) {
+	l.startOffset = in
+}
+
+func (l *lengthField) reserveLength() int {
+	return 4
+}
+
+func (l *lengthField) run(curOffset int, buf []byte) error {
+	binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4))
+	return nil
+}
+
+func (l *lengthField) check(curOffset int, buf []byte) error {
+	if int32(curOffset-l.startOffset-4) != l.length {
+		return PacketDecodingError{"length field invalid"}
+	}
+
+	return nil
+}
+
+type varintLengthField struct {
+	startOffset int
+	length      int64
+}
+
+func (l *varintLengthField) decode(pd packetDecoder) error {
+	var err error
+	l.length, err = pd.getVarint()
+	return err
+}
+
+func (l *varintLengthField) saveOffset(in int) {
+	l.startOffset = in
+}
+
+func (l *varintLengthField) adjustLength(currOffset int) int {
+	oldFieldSize := l.reserveLength()
+	l.length = int64(currOffset - l.startOffset - oldFieldSize)
+
+	return l.reserveLength() - oldFieldSize
+}
+
+func (l *varintLengthField) reserveLength() int {
+	var tmp [binary.MaxVarintLen64]byte
+	return binary.PutVarint(tmp[:], l.length)
+}
+
+func (l *varintLengthField) run(curOffset int, buf []byte) error {
+	binary.PutVarint(buf[l.startOffset:], l.length)
+	return nil
+}
+
+func (l *varintLengthField) check(curOffset int, buf []byte) error {
+	if int64(curOffset-l.startOffset-l.reserveLength()) != l.length {
+		return PacketDecodingError{"length field invalid"}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go
new file mode 100644
index 0000000..3b16abf
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/list_groups_request.go
@@ -0,0 +1,24 @@
+package sarama
+
+type ListGroupsRequest struct {
+}
+
+func (r *ListGroupsRequest) encode(pe packetEncoder) error {
+	return nil
+}
+
+func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+	return nil
+}
+
+func (r *ListGroupsRequest) key() int16 {
+	return 16
+}
+
+func (r *ListGroupsRequest) version() int16 {
+	return 0
+}
+
+func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go
new file mode 100644
index 0000000..56115d4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/list_groups_response.go
@@ -0,0 +1,69 @@
+package sarama
+
+type ListGroupsResponse struct {
+	Err    KError
+	Groups map[string]string
+}
+
+func (r *ListGroupsResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.Err))
+
+	if err := pe.putArrayLength(len(r.Groups)); err != nil {
+		return err
+	}
+	for groupId, protocolType := range r.Groups {
+		if err := pe.putString(groupId); err != nil {
+			return err
+		}
+		if err := pe.putString(protocolType); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.Err = KError(kerr)
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		return nil
+	}
+
+	r.Groups = make(map[string]string)
+	for i := 0; i < n; i++ {
+		groupId, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		protocolType, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		r.Groups[groupId] = protocolType
+	}
+
+	return nil
+}
+
+func (r *ListGroupsResponse) key() int16 {
+	return 16
+}
+
+func (r *ListGroupsResponse) version() int16 {
+	return 0
+}
+
+func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go
new file mode 100644
index 0000000..51d3309
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/message.go
@@ -0,0 +1,157 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
+type CompressionCodec int8
+
+// The lowest 3 bits contain the compression codec used for the message
+const compressionCodecMask int8 = 0x07
+
+const (
+	CompressionNone   CompressionCodec = 0
+	CompressionGZIP   CompressionCodec = 1
+	CompressionSnappy CompressionCodec = 2
+	CompressionLZ4    CompressionCodec = 3
+	CompressionZSTD   CompressionCodec = 4
+)
+
+func (cc CompressionCodec) String() string {
+	return []string{
+		"none",
+		"gzip",
+		"snappy",
+		"lz4",
+	}[int(cc)]
+}
+
+// CompressionLevelDefault is the constant to use in CompressionLevel
+// to have the default compression level for any codec. The value is picked
+// that we don't use any existing compression levels.
+const CompressionLevelDefault = -1000
+
+type Message struct {
+	Codec            CompressionCodec // codec used to compress the message contents
+	CompressionLevel int              // compression level
+	Key              []byte           // the message key, may be nil
+	Value            []byte           // the message contents
+	Set              *MessageSet      // the message set a message might wrap
+	Version          int8             // v1 requires Kafka 0.10
+	Timestamp        time.Time        // the timestamp of the message (version 1+ only)
+
+	compressedCache []byte
+	compressedSize  int // used for computing the compression ratio metrics
+}
+
+func (m *Message) encode(pe packetEncoder) error {
+	pe.push(newCRC32Field(crcIEEE))
+
+	pe.putInt8(m.Version)
+
+	attributes := int8(m.Codec) & compressionCodecMask
+	pe.putInt8(attributes)
+
+	if m.Version >= 1 {
+		if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil {
+			return err
+		}
+	}
+
+	err := pe.putBytes(m.Key)
+	if err != nil {
+		return err
+	}
+
+	var payload []byte
+
+	if m.compressedCache != nil {
+		payload = m.compressedCache
+		m.compressedCache = nil
+	} else if m.Value != nil {
+
+		payload, err = compress(m.Codec, m.CompressionLevel, m.Value)
+		if err != nil {
+			return err
+		}
+		m.compressedCache = payload
+		// Keep in mind the compressed payload size for metric gathering
+		m.compressedSize = len(payload)
+	}
+
+	if err = pe.putBytes(payload); err != nil {
+		return err
+	}
+
+	return pe.pop()
+}
+
+func (m *Message) decode(pd packetDecoder) (err error) {
+	err = pd.push(newCRC32Field(crcIEEE))
+	if err != nil {
+		return err
+	}
+
+	m.Version, err = pd.getInt8()
+	if err != nil {
+		return err
+	}
+
+	if m.Version > 1 {
+		return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)}
+	}
+
+	attribute, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	m.Codec = CompressionCodec(attribute & compressionCodecMask)
+
+	if m.Version == 1 {
+		if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil {
+			return err
+		}
+	}
+
+	m.Key, err = pd.getBytes()
+	if err != nil {
+		return err
+	}
+
+	m.Value, err = pd.getBytes()
+	if err != nil {
+		return err
+	}
+
+	// Required for deep equal assertion during tests but might be useful
+	// for future metrics about the compression ratio in fetch requests
+	m.compressedSize = len(m.Value)
+
+	switch m.Codec {
+	case CompressionNone:
+		// nothing to do
+	default:
+		if m.Value == nil {
+			break
+		}
+
+		m.Value, err = decompress(m.Codec, m.Value)
+		if err != nil {
+			return err
+		}
+		if err := m.decodeSet(); err != nil {
+			return err
+		}
+	}
+
+	return pd.pop()
+}
+
+// decodes a message set from a previousy encoded bulk-message
+func (m *Message) decodeSet() (err error) {
+	pd := realDecoder{raw: m.Value}
+	m.Set = &MessageSet{}
+	return m.Set.decode(&pd)
+}
diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go
new file mode 100644
index 0000000..600c7c4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/message_set.go
@@ -0,0 +1,108 @@
+package sarama
+
+type MessageBlock struct {
+	Offset int64
+	Msg    *Message
+}
+
+// Messages convenience helper which returns either all the
+// messages that are wrapped in this block
+func (msb *MessageBlock) Messages() []*MessageBlock {
+	if msb.Msg.Set != nil {
+		return msb.Msg.Set.Messages
+	}
+	return []*MessageBlock{msb}
+}
+
+func (msb *MessageBlock) encode(pe packetEncoder) error {
+	pe.putInt64(msb.Offset)
+	pe.push(&lengthField{})
+	err := msb.Msg.encode(pe)
+	if err != nil {
+		return err
+	}
+	return pe.pop()
+}
+
+func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
+	if msb.Offset, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	if err = pd.push(&lengthField{}); err != nil {
+		return err
+	}
+
+	msb.Msg = new(Message)
+	if err = msb.Msg.decode(pd); err != nil {
+		return err
+	}
+
+	if err = pd.pop(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+type MessageSet struct {
+	PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
+	OverflowMessage        bool // whether the set on the wire contained an overflow message
+	Messages               []*MessageBlock
+}
+
+func (ms *MessageSet) encode(pe packetEncoder) error {
+	for i := range ms.Messages {
+		err := ms.Messages[i].encode(pe)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (ms *MessageSet) decode(pd packetDecoder) (err error) {
+	ms.Messages = nil
+
+	for pd.remaining() > 0 {
+		magic, err := magicValue(pd)
+		if err != nil {
+			if err == ErrInsufficientData {
+				ms.PartialTrailingMessage = true
+				return nil
+			}
+			return err
+		}
+
+		if magic > 1 {
+			return nil
+		}
+
+		msb := new(MessageBlock)
+		err = msb.decode(pd)
+		switch err {
+		case nil:
+			ms.Messages = append(ms.Messages, msb)
+		case ErrInsufficientData:
+			// As an optimization the server is allowed to return a partial message at the
+			// end of the message set. Clients should handle this case. So we just ignore such things.
+			if msb.Offset == -1 {
+				// This is an overflow message caused by chunked down conversion
+				ms.OverflowMessage = true
+			} else {
+				ms.PartialTrailingMessage = true
+			}
+			return nil
+		default:
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (ms *MessageSet) addMessage(msg *Message) {
+	block := new(MessageBlock)
+	block.Msg = msg
+	ms.Messages = append(ms.Messages, block)
+}
diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go
new file mode 100644
index 0000000..17dc428
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metadata_request.go
@@ -0,0 +1,88 @@
+package sarama
+
+type MetadataRequest struct {
+	Version                int16
+	Topics                 []string
+	AllowAutoTopicCreation bool
+}
+
+func (r *MetadataRequest) encode(pe packetEncoder) error {
+	if r.Version < 0 || r.Version > 5 {
+		return PacketEncodingError{"invalid or unsupported MetadataRequest version field"}
+	}
+	if r.Version == 0 || len(r.Topics) > 0 {
+		err := pe.putArrayLength(len(r.Topics))
+		if err != nil {
+			return err
+		}
+
+		for i := range r.Topics {
+			err = pe.putString(r.Topics[i])
+			if err != nil {
+				return err
+			}
+		}
+	} else {
+		pe.putInt32(-1)
+	}
+	if r.Version > 3 {
+		pe.putBool(r.AllowAutoTopicCreation)
+	}
+	return nil
+}
+
+func (r *MetadataRequest) decode(pd packetDecoder, version int16) error {
+	r.Version = version
+	size, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	if size < 0 {
+		return nil
+	} else {
+		topicCount := size
+		if topicCount == 0 {
+			return nil
+		}
+
+		r.Topics = make([]string, topicCount)
+		for i := range r.Topics {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			r.Topics[i] = topic
+		}
+	}
+	if r.Version > 3 {
+		autoCreation, err := pd.getBool()
+		if err != nil {
+			return err
+		}
+		r.AllowAutoTopicCreation = autoCreation
+	}
+	return nil
+}
+
+func (r *MetadataRequest) key() int16 {
+	return 3
+}
+
+func (r *MetadataRequest) version() int16 {
+	return r.Version
+}
+
+func (r *MetadataRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_10_0_0
+	case 2:
+		return V0_10_1_0
+	case 3, 4:
+		return V0_11_0_0
+	case 5:
+		return V1_0_0_0
+	default:
+		return MinVersion
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go
new file mode 100644
index 0000000..c402d05
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metadata_response.go
@@ -0,0 +1,321 @@
+package sarama
+
+type PartitionMetadata struct {
+	Err             KError
+	ID              int32
+	Leader          int32
+	Replicas        []int32
+	Isr             []int32
+	OfflineReplicas []int32
+}
+
+func (pm *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) {
+	tmp, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	pm.Err = KError(tmp)
+
+	pm.ID, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	pm.Leader, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	pm.Replicas, err = pd.getInt32Array()
+	if err != nil {
+		return err
+	}
+
+	pm.Isr, err = pd.getInt32Array()
+	if err != nil {
+		return err
+	}
+
+	if version >= 5 {
+		pm.OfflineReplicas, err = pd.getInt32Array()
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (pm *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) {
+	pe.putInt16(int16(pm.Err))
+	pe.putInt32(pm.ID)
+	pe.putInt32(pm.Leader)
+
+	err = pe.putInt32Array(pm.Replicas)
+	if err != nil {
+		return err
+	}
+
+	err = pe.putInt32Array(pm.Isr)
+	if err != nil {
+		return err
+	}
+
+	if version >= 5 {
+		err = pe.putInt32Array(pm.OfflineReplicas)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+type TopicMetadata struct {
+	Err        KError
+	Name       string
+	IsInternal bool // Only valid for Version >= 1
+	Partitions []*PartitionMetadata
+}
+
+func (tm *TopicMetadata) decode(pd packetDecoder, version int16) (err error) {
+	tmp, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	tm.Err = KError(tmp)
+
+	tm.Name, err = pd.getString()
+	if err != nil {
+		return err
+	}
+
+	if version >= 1 {
+		tm.IsInternal, err = pd.getBool()
+		if err != nil {
+			return err
+		}
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	tm.Partitions = make([]*PartitionMetadata, n)
+	for i := 0; i < n; i++ {
+		tm.Partitions[i] = new(PartitionMetadata)
+		err = tm.Partitions[i].decode(pd, version)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (tm *TopicMetadata) encode(pe packetEncoder, version int16) (err error) {
+	pe.putInt16(int16(tm.Err))
+
+	err = pe.putString(tm.Name)
+	if err != nil {
+		return err
+	}
+
+	if version >= 1 {
+		pe.putBool(tm.IsInternal)
+	}
+
+	err = pe.putArrayLength(len(tm.Partitions))
+	if err != nil {
+		return err
+	}
+
+	for _, pm := range tm.Partitions {
+		err = pm.encode(pe, version)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+type MetadataResponse struct {
+	Version        int16
+	ThrottleTimeMs int32
+	Brokers        []*Broker
+	ClusterID      *string
+	ControllerID   int32
+	Topics         []*TopicMetadata
+}
+
+func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if version >= 3 {
+		r.ThrottleTimeMs, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Brokers = make([]*Broker, n)
+	for i := 0; i < n; i++ {
+		r.Brokers[i] = new(Broker)
+		err = r.Brokers[i].decode(pd, version)
+		if err != nil {
+			return err
+		}
+	}
+
+	if version >= 2 {
+		r.ClusterID, err = pd.getNullableString()
+		if err != nil {
+			return err
+		}
+	}
+
+	if version >= 1 {
+		r.ControllerID, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	} else {
+		r.ControllerID = -1
+	}
+
+	n, err = pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Topics = make([]*TopicMetadata, n)
+	for i := 0; i < n; i++ {
+		r.Topics[i] = new(TopicMetadata)
+		err = r.Topics[i].decode(pd, version)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *MetadataResponse) encode(pe packetEncoder) error {
+	if r.Version >= 3 {
+		pe.putInt32(r.ThrottleTimeMs)
+	}
+
+	err := pe.putArrayLength(len(r.Brokers))
+	if err != nil {
+		return err
+	}
+	for _, broker := range r.Brokers {
+		err = broker.encode(pe, r.Version)
+		if err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 2 {
+		err := pe.putNullableString(r.ClusterID)
+		if err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 1 {
+		pe.putInt32(r.ControllerID)
+	}
+
+	err = pe.putArrayLength(len(r.Topics))
+	if err != nil {
+		return err
+	}
+	for _, tm := range r.Topics {
+		err = tm.encode(pe, r.Version)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *MetadataResponse) key() int16 {
+	return 3
+}
+
+func (r *MetadataResponse) version() int16 {
+	return r.Version
+}
+
+func (r *MetadataResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_10_0_0
+	case 2:
+		return V0_10_1_0
+	case 3, 4:
+		return V0_11_0_0
+	case 5:
+		return V1_0_0_0
+	default:
+		return MinVersion
+	}
+}
+
+// testing API
+
+func (r *MetadataResponse) AddBroker(addr string, id int32) {
+	r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr})
+}
+
+func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
+	var tmatch *TopicMetadata
+
+	for _, tm := range r.Topics {
+		if tm.Name == topic {
+			tmatch = tm
+			goto foundTopic
+		}
+	}
+
+	tmatch = new(TopicMetadata)
+	tmatch.Name = topic
+	r.Topics = append(r.Topics, tmatch)
+
+foundTopic:
+
+	tmatch.Err = err
+	return tmatch
+}
+
+func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) {
+	tmatch := r.AddTopic(topic, ErrNoError)
+	var pmatch *PartitionMetadata
+
+	for _, pm := range tmatch.Partitions {
+		if pm.ID == partition {
+			pmatch = pm
+			goto foundPartition
+		}
+	}
+
+	pmatch = new(PartitionMetadata)
+	pmatch.ID = partition
+	tmatch.Partitions = append(tmatch.Partitions, pmatch)
+
+foundPartition:
+
+	pmatch.Leader = brokerID
+	pmatch.Replicas = replicas
+	pmatch.Isr = isr
+	pmatch.Err = err
+
+}
diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go
new file mode 100644
index 0000000..4869708
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metrics.go
@@ -0,0 +1,51 @@
+package sarama
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
+// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
+// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
+// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
+const (
+	metricsReservoirSize = 1028
+	metricsAlphaFactor   = 0.015
+)
+
+func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
+	return r.GetOrRegister(name, func() metrics.Histogram {
+		return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
+	}).(metrics.Histogram)
+}
+
+func getMetricNameForBroker(name string, broker *Broker) string {
+	// Use broker id like the Java client as it does not contain '.' or ':' characters that
+	// can be interpreted as special character by monitoring tool (e.g. Graphite)
+	return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
+}
+
+func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter {
+	return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r)
+}
+
+func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram {
+	return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r)
+}
+
+func getMetricNameForTopic(name string, topic string) string {
+	// Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
+	// cf. KAFKA-1902 and KAFKA-2337
+	return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1))
+}
+
+func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
+	return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
+}
+
+func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
+	return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
+}
diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go
new file mode 100644
index 0000000..55ef1e2
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mockbroker.go
@@ -0,0 +1,330 @@
+package sarama
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"net"
+	"reflect"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/davecgh/go-spew/spew"
+)
+
+const (
+	expectationTimeout = 500 * time.Millisecond
+)
+
+type requestHandlerFunc func(req *request) (res encoder)
+
+// RequestNotifierFunc is invoked when a mock broker processes a request successfully
+// and will provides the number of bytes read and written.
+type RequestNotifierFunc func(bytesRead, bytesWritten int)
+
+// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed
+// to facilitate testing of higher level or specialized consumers and producers
+// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
+// but rather provides a facility to do that. It takes care of the TCP
+// transport, request unmarshaling, response marshaling, and makes it the test
+// writer responsibility to program correct according to the Kafka API protocol
+// MockBroker behaviour.
+//
+// MockBroker is implemented as a TCP server listening on a kernel-selected
+// localhost port that can accept many connections. It reads Kafka requests
+// from that connection and returns responses programmed by the SetHandlerByMap
+// function. If a MockBroker receives a request that it has no programmed
+// response for, then it returns nothing and the request times out.
+//
+// A set of MockRequest builders to define mappings used by MockBroker is
+// provided by Sarama. But users can develop MockRequests of their own and use
+// them along with or instead of the standard ones.
+//
+// When running tests with MockBroker it is strongly recommended to specify
+// a timeout to `go test` so that if the broker hangs waiting for a response,
+// the test panics.
+//
+// It is not necessary to prefix message length or correlation ID to your
+// response bytes, the server does that automatically as a convenience.
+type MockBroker struct {
+	brokerID     int32
+	port         int32
+	closing      chan none
+	stopper      chan none
+	expectations chan encoder
+	listener     net.Listener
+	t            TestReporter
+	latency      time.Duration
+	handler      requestHandlerFunc
+	notifier     RequestNotifierFunc
+	history      []RequestResponse
+	lock         sync.Mutex
+}
+
+// RequestResponse represents a Request/Response pair processed by MockBroker.
+type RequestResponse struct {
+	Request  protocolBody
+	Response encoder
+}
+
+// SetLatency makes broker pause for the specified period every time before
+// replying.
+func (b *MockBroker) SetLatency(latency time.Duration) {
+	b.latency = latency
+}
+
+// SetHandlerByMap defines mapping of Request types to MockResponses. When a
+// request is received by the broker, it looks up the request type in the map
+// and uses the found MockResponse instance to generate an appropriate reply.
+// If the request type is not found in the map then nothing is sent.
+func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
+	b.setHandler(func(req *request) (res encoder) {
+		reqTypeName := reflect.TypeOf(req.body).Elem().Name()
+		mockResponse := handlerMap[reqTypeName]
+		if mockResponse == nil {
+			return nil
+		}
+		return mockResponse.For(req.body)
+	})
+}
+
+// SetNotifier set a function that will get invoked whenever a request has been
+// processed successfully and will provide the number of bytes read and written
+func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) {
+	b.lock.Lock()
+	b.notifier = notifier
+	b.lock.Unlock()
+}
+
+// BrokerID returns broker ID assigned to the broker.
+func (b *MockBroker) BrokerID() int32 {
+	return b.brokerID
+}
+
+// History returns a slice of RequestResponse pairs in the order they were
+// processed by the broker. Note that in case of multiple connections to the
+// broker the order expected by a test can be different from the order recorded
+// in the history, unless some synchronization is implemented in the test.
+func (b *MockBroker) History() []RequestResponse {
+	b.lock.Lock()
+	history := make([]RequestResponse, len(b.history))
+	copy(history, b.history)
+	b.lock.Unlock()
+	return history
+}
+
+// Port returns the TCP port number the broker is listening for requests on.
+func (b *MockBroker) Port() int32 {
+	return b.port
+}
+
+// Addr returns the broker connection string in the form "<address>:<port>".
+func (b *MockBroker) Addr() string {
+	return b.listener.Addr().String()
+}
+
+// Close terminates the broker blocking until it stops internal goroutines and
+// releases all resources.
+func (b *MockBroker) Close() {
+	close(b.expectations)
+	if len(b.expectations) > 0 {
+		buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
+		for e := range b.expectations {
+			_, _ = buf.WriteString(spew.Sdump(e))
+		}
+		b.t.Error(buf.String())
+	}
+	close(b.closing)
+	<-b.stopper
+}
+
+// setHandler sets the specified function as the request handler. Whenever
+// a mock broker reads a request from the wire it passes the request to the
+// function and sends back whatever the handler function returns.
+func (b *MockBroker) setHandler(handler requestHandlerFunc) {
+	b.lock.Lock()
+	b.handler = handler
+	b.lock.Unlock()
+}
+
+func (b *MockBroker) serverLoop() {
+	defer close(b.stopper)
+	var err error
+	var conn net.Conn
+
+	go func() {
+		<-b.closing
+		err := b.listener.Close()
+		if err != nil {
+			b.t.Error(err)
+		}
+	}()
+
+	wg := &sync.WaitGroup{}
+	i := 0
+	for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
+		wg.Add(1)
+		go b.handleRequests(conn, i, wg)
+		i++
+	}
+	wg.Wait()
+	Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
+}
+
+func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) {
+	defer wg.Done()
+	defer func() {
+		_ = conn.Close()
+	}()
+	Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
+	var err error
+
+	abort := make(chan none)
+	defer close(abort)
+	go func() {
+		select {
+		case <-b.closing:
+			_ = conn.Close()
+		case <-abort:
+		}
+	}()
+
+	resHeader := make([]byte, 8)
+	for {
+		req, bytesRead, err := decodeRequest(conn)
+		if err != nil {
+			Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
+			b.serverError(err)
+			break
+		}
+
+		if b.latency > 0 {
+			time.Sleep(b.latency)
+		}
+
+		b.lock.Lock()
+		res := b.handler(req)
+		b.history = append(b.history, RequestResponse{req.body, res})
+		b.lock.Unlock()
+
+		if res == nil {
+			Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
+			continue
+		}
+		Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
+
+		encodedRes, err := encode(res, nil)
+		if err != nil {
+			b.serverError(err)
+			break
+		}
+		if len(encodedRes) == 0 {
+			b.lock.Lock()
+			if b.notifier != nil {
+				b.notifier(bytesRead, 0)
+			}
+			b.lock.Unlock()
+			continue
+		}
+
+		binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4))
+		binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID))
+		if _, err = conn.Write(resHeader); err != nil {
+			b.serverError(err)
+			break
+		}
+		if _, err = conn.Write(encodedRes); err != nil {
+			b.serverError(err)
+			break
+		}
+
+		b.lock.Lock()
+		if b.notifier != nil {
+			b.notifier(bytesRead, len(resHeader)+len(encodedRes))
+		}
+		b.lock.Unlock()
+	}
+	Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
+}
+
+func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) {
+	select {
+	case res, ok := <-b.expectations:
+		if !ok {
+			return nil
+		}
+		return res
+	case <-time.After(expectationTimeout):
+		return nil
+	}
+}
+
+func (b *MockBroker) serverError(err error) {
+	isConnectionClosedError := false
+	if _, ok := err.(*net.OpError); ok {
+		isConnectionClosedError = true
+	} else if err == io.EOF {
+		isConnectionClosedError = true
+	} else if err.Error() == "use of closed network connection" {
+		isConnectionClosedError = true
+	}
+
+	if isConnectionClosedError {
+		return
+	}
+
+	b.t.Errorf(err.Error())
+}
+
+// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the
+// test framework and a channel of responses to use.  If an error occurs it is
+// simply logged to the TestReporter and the broker exits.
+func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
+	return NewMockBrokerAddr(t, brokerID, "localhost:0")
+}
+
+// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
+// it rather than just some ephemeral port.
+func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
+	listener, err := net.Listen("tcp", addr)
+	if err != nil {
+		t.Fatal(err)
+	}
+	return NewMockBrokerListener(t, brokerID, listener)
+}
+
+// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified.
+func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker {
+	var err error
+
+	broker := &MockBroker{
+		closing:      make(chan none),
+		stopper:      make(chan none),
+		t:            t,
+		brokerID:     brokerID,
+		expectations: make(chan encoder, 512),
+		listener:     listener,
+	}
+	broker.handler = broker.defaultRequestHandler
+
+	Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
+	_, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
+	if err != nil {
+		t.Fatal(err)
+	}
+	tmp, err := strconv.ParseInt(portStr, 10, 32)
+	if err != nil {
+		t.Fatal(err)
+	}
+	broker.port = int32(tmp)
+
+	go broker.serverLoop()
+
+	return broker
+}
+
+func (b *MockBroker) Returns(e encoder) {
+	b.expectations <- e
+}
diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go
new file mode 100644
index 0000000..fe55200
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mockresponses.go
@@ -0,0 +1,727 @@
+package sarama
+
+import (
+	"fmt"
+)
+
+// TestReporter has methods matching go's testing.T to avoid importing
+// `testing` in the main part of the library.
+type TestReporter interface {
+	Error(...interface{})
+	Errorf(string, ...interface{})
+	Fatal(...interface{})
+	Fatalf(string, ...interface{})
+}
+
+// MockResponse is a response builder interface it defines one method that
+// allows generating a response based on a request body. MockResponses are used
+// to program behavior of MockBroker in tests.
+type MockResponse interface {
+	For(reqBody versionedDecoder) (res encoder)
+}
+
+// MockWrapper is a mock response builder that returns a particular concrete
+// response regardless of the actual request passed to the `For` method.
+type MockWrapper struct {
+	res encoder
+}
+
+func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) {
+	return mw.res
+}
+
+func NewMockWrapper(res encoder) *MockWrapper {
+	return &MockWrapper{res: res}
+}
+
+// MockSequence is a mock response builder that is created from a sequence of
+// concrete responses. Every time when a `MockBroker` calls its `For` method
+// the next response from the sequence is returned. When the end of the
+// sequence is reached the last element from the sequence is returned.
+type MockSequence struct {
+	responses []MockResponse
+}
+
+func NewMockSequence(responses ...interface{}) *MockSequence {
+	ms := &MockSequence{}
+	ms.responses = make([]MockResponse, len(responses))
+	for i, res := range responses {
+		switch res := res.(type) {
+		case MockResponse:
+			ms.responses[i] = res
+		case encoder:
+			ms.responses[i] = NewMockWrapper(res)
+		default:
+			panic(fmt.Sprintf("Unexpected response type: %T", res))
+		}
+	}
+	return ms
+}
+
+func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) {
+	res = mc.responses[0].For(reqBody)
+	if len(mc.responses) > 1 {
+		mc.responses = mc.responses[1:]
+	}
+	return res
+}
+
+// MockMetadataResponse is a `MetadataResponse` builder.
+type MockMetadataResponse struct {
+	controllerID int32
+	leaders      map[string]map[int32]int32
+	brokers      map[string]int32
+	t            TestReporter
+}
+
+func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
+	return &MockMetadataResponse{
+		leaders: make(map[string]map[int32]int32),
+		brokers: make(map[string]int32),
+		t:       t,
+	}
+}
+
+func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse {
+	partitions := mmr.leaders[topic]
+	if partitions == nil {
+		partitions = make(map[int32]int32)
+		mmr.leaders[topic] = partitions
+	}
+	partitions[partition] = brokerID
+	return mmr
+}
+
+func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse {
+	mmr.brokers[addr] = brokerID
+	return mmr
+}
+
+func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResponse {
+	mmr.controllerID = brokerID
+	return mmr
+}
+
+func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
+	metadataRequest := reqBody.(*MetadataRequest)
+	metadataResponse := &MetadataResponse{
+		Version:      metadataRequest.version(),
+		ControllerID: mmr.controllerID,
+	}
+	for addr, brokerID := range mmr.brokers {
+		metadataResponse.AddBroker(addr, brokerID)
+	}
+	if len(metadataRequest.Topics) == 0 {
+		for topic, partitions := range mmr.leaders {
+			for partition, brokerID := range partitions {
+				metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
+			}
+		}
+		return metadataResponse
+	}
+	for _, topic := range metadataRequest.Topics {
+		for partition, brokerID := range mmr.leaders[topic] {
+			metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
+		}
+	}
+	return metadataResponse
+}
+
+// MockOffsetResponse is an `OffsetResponse` builder.
+type MockOffsetResponse struct {
+	offsets map[string]map[int32]map[int64]int64
+	t       TestReporter
+	version int16
+}
+
+func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
+	return &MockOffsetResponse{
+		offsets: make(map[string]map[int32]map[int64]int64),
+		t:       t,
+	}
+}
+
+func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse {
+	mor.version = version
+	return mor
+}
+
+func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
+	partitions := mor.offsets[topic]
+	if partitions == nil {
+		partitions = make(map[int32]map[int64]int64)
+		mor.offsets[topic] = partitions
+	}
+	times := partitions[partition]
+	if times == nil {
+		times = make(map[int64]int64)
+		partitions[partition] = times
+	}
+	times[time] = offset
+	return mor
+}
+
+func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder {
+	offsetRequest := reqBody.(*OffsetRequest)
+	offsetResponse := &OffsetResponse{Version: mor.version}
+	for topic, partitions := range offsetRequest.blocks {
+		for partition, block := range partitions {
+			offset := mor.getOffset(topic, partition, block.time)
+			offsetResponse.AddTopicPartition(topic, partition, offset)
+		}
+	}
+	return offsetResponse
+}
+
+func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 {
+	partitions := mor.offsets[topic]
+	if partitions == nil {
+		mor.t.Errorf("missing topic: %s", topic)
+	}
+	times := partitions[partition]
+	if times == nil {
+		mor.t.Errorf("missing partition: %d", partition)
+	}
+	offset, ok := times[time]
+	if !ok {
+		mor.t.Errorf("missing time: %d", time)
+	}
+	return offset
+}
+
+// MockFetchResponse is a `FetchResponse` builder.
+type MockFetchResponse struct {
+	messages       map[string]map[int32]map[int64]Encoder
+	highWaterMarks map[string]map[int32]int64
+	t              TestReporter
+	batchSize      int
+	version        int16
+}
+
+func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
+	return &MockFetchResponse{
+		messages:       make(map[string]map[int32]map[int64]Encoder),
+		highWaterMarks: make(map[string]map[int32]int64),
+		t:              t,
+		batchSize:      batchSize,
+	}
+}
+
+func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse {
+	mfr.version = version
+	return mfr
+}
+
+func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
+	partitions := mfr.messages[topic]
+	if partitions == nil {
+		partitions = make(map[int32]map[int64]Encoder)
+		mfr.messages[topic] = partitions
+	}
+	messages := partitions[partition]
+	if messages == nil {
+		messages = make(map[int64]Encoder)
+		partitions[partition] = messages
+	}
+	messages[offset] = msg
+	return mfr
+}
+
+func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse {
+	partitions := mfr.highWaterMarks[topic]
+	if partitions == nil {
+		partitions = make(map[int32]int64)
+		mfr.highWaterMarks[topic] = partitions
+	}
+	partitions[partition] = offset
+	return mfr
+}
+
+func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
+	fetchRequest := reqBody.(*FetchRequest)
+	res := &FetchResponse{
+		Version: mfr.version,
+	}
+	for topic, partitions := range fetchRequest.blocks {
+		for partition, block := range partitions {
+			initialOffset := block.fetchOffset
+			offset := initialOffset
+			maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition))
+			for i := 0; i < mfr.batchSize && offset < maxOffset; {
+				msg := mfr.getMessage(topic, partition, offset)
+				if msg != nil {
+					res.AddMessage(topic, partition, nil, msg, offset)
+					i++
+				}
+				offset++
+			}
+			fb := res.GetBlock(topic, partition)
+			if fb == nil {
+				res.AddError(topic, partition, ErrNoError)
+				fb = res.GetBlock(topic, partition)
+			}
+			fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition)
+		}
+	}
+	return res
+}
+
+func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder {
+	partitions := mfr.messages[topic]
+	if partitions == nil {
+		return nil
+	}
+	messages := partitions[partition]
+	if messages == nil {
+		return nil
+	}
+	return messages[offset]
+}
+
+func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int {
+	partitions := mfr.messages[topic]
+	if partitions == nil {
+		return 0
+	}
+	messages := partitions[partition]
+	if messages == nil {
+		return 0
+	}
+	return len(messages)
+}
+
+func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 {
+	partitions := mfr.highWaterMarks[topic]
+	if partitions == nil {
+		return 0
+	}
+	return partitions[partition]
+}
+
+// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder.
+type MockConsumerMetadataResponse struct {
+	coordinators map[string]interface{}
+	t            TestReporter
+}
+
+func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse {
+	return &MockConsumerMetadataResponse{
+		coordinators: make(map[string]interface{}),
+		t:            t,
+	}
+}
+
+func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse {
+	mr.coordinators[group] = broker
+	return mr
+}
+
+func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse {
+	mr.coordinators[group] = kerror
+	return mr
+}
+
+func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*ConsumerMetadataRequest)
+	group := req.ConsumerGroup
+	res := &ConsumerMetadataResponse{}
+	v := mr.coordinators[group]
+	switch v := v.(type) {
+	case *MockBroker:
+		res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
+	case KError:
+		res.Err = v
+	}
+	return res
+}
+
+// MockFindCoordinatorResponse is a `FindCoordinatorResponse` builder.
+type MockFindCoordinatorResponse struct {
+	groupCoordinators map[string]interface{}
+	transCoordinators map[string]interface{}
+	t                 TestReporter
+}
+
+func NewMockFindCoordinatorResponse(t TestReporter) *MockFindCoordinatorResponse {
+	return &MockFindCoordinatorResponse{
+		groupCoordinators: make(map[string]interface{}),
+		transCoordinators: make(map[string]interface{}),
+		t:                 t,
+	}
+}
+
+func (mr *MockFindCoordinatorResponse) SetCoordinator(coordinatorType CoordinatorType, group string, broker *MockBroker) *MockFindCoordinatorResponse {
+	switch coordinatorType {
+	case CoordinatorGroup:
+		mr.groupCoordinators[group] = broker
+	case CoordinatorTransaction:
+		mr.transCoordinators[group] = broker
+	}
+	return mr
+}
+
+func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, group string, kerror KError) *MockFindCoordinatorResponse {
+	switch coordinatorType {
+	case CoordinatorGroup:
+		mr.groupCoordinators[group] = kerror
+	case CoordinatorTransaction:
+		mr.transCoordinators[group] = kerror
+	}
+	return mr
+}
+
+func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*FindCoordinatorRequest)
+	res := &FindCoordinatorResponse{}
+	var v interface{}
+	switch req.CoordinatorType {
+	case CoordinatorGroup:
+		v = mr.groupCoordinators[req.CoordinatorKey]
+	case CoordinatorTransaction:
+		v = mr.transCoordinators[req.CoordinatorKey]
+	}
+	switch v := v.(type) {
+	case *MockBroker:
+		res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
+	case KError:
+		res.Err = v
+	}
+	return res
+}
+
+// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
+type MockOffsetCommitResponse struct {
+	errors map[string]map[string]map[int32]KError
+	t      TestReporter
+}
+
+func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse {
+	return &MockOffsetCommitResponse{t: t}
+}
+
+func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse {
+	if mr.errors == nil {
+		mr.errors = make(map[string]map[string]map[int32]KError)
+	}
+	topics := mr.errors[group]
+	if topics == nil {
+		topics = make(map[string]map[int32]KError)
+		mr.errors[group] = topics
+	}
+	partitions := topics[topic]
+	if partitions == nil {
+		partitions = make(map[int32]KError)
+		topics[topic] = partitions
+	}
+	partitions[partition] = kerror
+	return mr
+}
+
+func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*OffsetCommitRequest)
+	group := req.ConsumerGroup
+	res := &OffsetCommitResponse{}
+	for topic, partitions := range req.blocks {
+		for partition := range partitions {
+			res.AddError(topic, partition, mr.getError(group, topic, partition))
+		}
+	}
+	return res
+}
+
+func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError {
+	topics := mr.errors[group]
+	if topics == nil {
+		return ErrNoError
+	}
+	partitions := topics[topic]
+	if partitions == nil {
+		return ErrNoError
+	}
+	kerror, ok := partitions[partition]
+	if !ok {
+		return ErrNoError
+	}
+	return kerror
+}
+
+// MockProduceResponse is a `ProduceResponse` builder.
+type MockProduceResponse struct {
+	version int16
+	errors  map[string]map[int32]KError
+	t       TestReporter
+}
+
+func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
+	return &MockProduceResponse{t: t}
+}
+
+func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse {
+	mr.version = version
+	return mr
+}
+
+func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
+	if mr.errors == nil {
+		mr.errors = make(map[string]map[int32]KError)
+	}
+	partitions := mr.errors[topic]
+	if partitions == nil {
+		partitions = make(map[int32]KError)
+		mr.errors[topic] = partitions
+	}
+	partitions[partition] = kerror
+	return mr
+}
+
+func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*ProduceRequest)
+	res := &ProduceResponse{
+		Version: mr.version,
+	}
+	for topic, partitions := range req.records {
+		for partition := range partitions {
+			res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
+		}
+	}
+	return res
+}
+
+func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
+	partitions := mr.errors[topic]
+	if partitions == nil {
+		return ErrNoError
+	}
+	kerror, ok := partitions[partition]
+	if !ok {
+		return ErrNoError
+	}
+	return kerror
+}
+
+// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
+type MockOffsetFetchResponse struct {
+	offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
+	t       TestReporter
+}
+
+func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse {
+	return &MockOffsetFetchResponse{t: t}
+}
+
+func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse {
+	if mr.offsets == nil {
+		mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock)
+	}
+	topics := mr.offsets[group]
+	if topics == nil {
+		topics = make(map[string]map[int32]*OffsetFetchResponseBlock)
+		mr.offsets[group] = topics
+	}
+	partitions := topics[topic]
+	if partitions == nil {
+		partitions = make(map[int32]*OffsetFetchResponseBlock)
+		topics[topic] = partitions
+	}
+	partitions[partition] = &OffsetFetchResponseBlock{offset, 0, metadata, kerror}
+	return mr
+}
+
+func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*OffsetFetchRequest)
+	group := req.ConsumerGroup
+	res := &OffsetFetchResponse{}
+	for topic, partitions := range mr.offsets[group] {
+		for partition, block := range partitions {
+			res.AddBlock(topic, partition, block)
+		}
+	}
+	return res
+}
+
+type MockCreateTopicsResponse struct {
+	t TestReporter
+}
+
+func NewMockCreateTopicsResponse(t TestReporter) *MockCreateTopicsResponse {
+	return &MockCreateTopicsResponse{t: t}
+}
+
+func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*CreateTopicsRequest)
+	res := &CreateTopicsResponse{}
+	res.TopicErrors = make(map[string]*TopicError)
+
+	for topic, _ := range req.TopicDetails {
+		res.TopicErrors[topic] = &TopicError{Err: ErrNoError}
+	}
+	return res
+}
+
+type MockDeleteTopicsResponse struct {
+	t TestReporter
+}
+
+func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse {
+	return &MockDeleteTopicsResponse{t: t}
+}
+
+func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*DeleteTopicsRequest)
+	res := &DeleteTopicsResponse{}
+	res.TopicErrorCodes = make(map[string]KError)
+
+	for _, topic := range req.Topics {
+		res.TopicErrorCodes[topic] = ErrNoError
+	}
+	return res
+}
+
+type MockCreatePartitionsResponse struct {
+	t TestReporter
+}
+
+func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsResponse {
+	return &MockCreatePartitionsResponse{t: t}
+}
+
+func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*CreatePartitionsRequest)
+	res := &CreatePartitionsResponse{}
+	res.TopicPartitionErrors = make(map[string]*TopicPartitionError)
+
+	for topic, _ := range req.TopicPartitions {
+		res.TopicPartitionErrors[topic] = &TopicPartitionError{Err: ErrNoError}
+	}
+	return res
+}
+
+type MockDeleteRecordsResponse struct {
+	t TestReporter
+}
+
+func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse {
+	return &MockDeleteRecordsResponse{t: t}
+}
+
+func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*DeleteRecordsRequest)
+	res := &DeleteRecordsResponse{}
+	res.Topics = make(map[string]*DeleteRecordsResponseTopic)
+
+	for topic, deleteRecordRequestTopic := range req.Topics {
+		partitions := make(map[int32]*DeleteRecordsResponsePartition)
+		for partition, _ := range deleteRecordRequestTopic.PartitionOffsets {
+			partitions[partition] = &DeleteRecordsResponsePartition{Err: ErrNoError}
+		}
+		res.Topics[topic] = &DeleteRecordsResponseTopic{Partitions: partitions}
+	}
+	return res
+}
+
+type MockDescribeConfigsResponse struct {
+	t TestReporter
+}
+
+func NewMockDescribeConfigsResponse(t TestReporter) *MockDescribeConfigsResponse {
+	return &MockDescribeConfigsResponse{t: t}
+}
+
+func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*DescribeConfigsRequest)
+	res := &DescribeConfigsResponse{}
+
+	var configEntries []*ConfigEntry
+	configEntries = append(configEntries, &ConfigEntry{Name: "my_topic",
+		Value:     "my_topic",
+		ReadOnly:  true,
+		Default:   true,
+		Sensitive: false,
+	})
+
+	for _, r := range req.Resources {
+		res.Resources = append(res.Resources, &ResourceResponse{Name: r.Name, Configs: configEntries})
+	}
+	return res
+}
+
+type MockAlterConfigsResponse struct {
+	t TestReporter
+}
+
+func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse {
+	return &MockAlterConfigsResponse{t: t}
+}
+
+func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*AlterConfigsRequest)
+	res := &AlterConfigsResponse{}
+
+	for _, r := range req.Resources {
+		res.Resources = append(res.Resources, &AlterConfigsResourceResponse{Name: r.Name,
+			Type:     TopicResource,
+			ErrorMsg: "",
+		})
+	}
+	return res
+}
+
+type MockCreateAclsResponse struct {
+	t TestReporter
+}
+
+func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse {
+	return &MockCreateAclsResponse{t: t}
+}
+
+func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*CreateAclsRequest)
+	res := &CreateAclsResponse{}
+
+	for range req.AclCreations {
+		res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrNoError})
+	}
+	return res
+}
+
+type MockListAclsResponse struct {
+	t TestReporter
+}
+
+func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse {
+	return &MockListAclsResponse{t: t}
+}
+
+func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*DescribeAclsRequest)
+	res := &DescribeAclsResponse{}
+
+	res.Err = ErrNoError
+	acl := &ResourceAcls{}
+	acl.Resource.ResourceName = *req.ResourceName
+	acl.Resource.ResourceType = req.ResourceType
+	acl.Acls = append(acl.Acls, &Acl{})
+	res.ResourceAcls = append(res.ResourceAcls, acl)
+
+	return res
+}
+
+type MockDeleteAclsResponse struct {
+	t TestReporter
+}
+
+func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse {
+	return &MockDeleteAclsResponse{t: t}
+}
+
+func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoder {
+	req := reqBody.(*DeleteAclsRequest)
+	res := &DeleteAclsResponse{}
+
+	for range req.Filters {
+		response := &FilterResponse{Err: ErrNoError}
+		response.MatchingAcls = append(response.MatchingAcls, &MatchingAcl{Err: ErrNoError})
+		res.FilterResponses = append(res.FilterResponses, response)
+	}
+	return res
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go
new file mode 100644
index 0000000..1ec583e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go
@@ -0,0 +1,210 @@
+package sarama
+
+import "errors"
+
+// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
+// tells the broker to set the timestamp to the time at which the request was received.
+// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
+const ReceiveTime int64 = -1
+
+// GroupGenerationUndefined is a special value for the group generation field of
+// Offset Commit Requests that should be used when a consumer group does not rely
+// on Kafka for partition management.
+const GroupGenerationUndefined = -1
+
+type offsetCommitRequestBlock struct {
+	offset    int64
+	timestamp int64
+	metadata  string
+}
+
+func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
+	pe.putInt64(b.offset)
+	if version == 1 {
+		pe.putInt64(b.timestamp)
+	} else if b.timestamp != 0 {
+		Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
+	}
+
+	return pe.putString(b.metadata)
+}
+
+func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+	if b.offset, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if version == 1 {
+		if b.timestamp, err = pd.getInt64(); err != nil {
+			return err
+		}
+	}
+	b.metadata, err = pd.getString()
+	return err
+}
+
+type OffsetCommitRequest struct {
+	ConsumerGroup           string
+	ConsumerGroupGeneration int32  // v1 or later
+	ConsumerID              string // v1 or later
+	RetentionTime           int64  // v2 or later
+
+	// Version can be:
+	// - 0 (kafka 0.8.1 and later)
+	// - 1 (kafka 0.8.2 and later)
+	// - 2 (kafka 0.9.0 and later)
+	// - 3 (kafka 0.11.0 and later)
+	// - 4 (kafka 2.0.0 and later)
+	Version int16
+	blocks  map[string]map[int32]*offsetCommitRequestBlock
+}
+
+func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
+	if r.Version < 0 || r.Version > 4 {
+		return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
+	}
+
+	if err := pe.putString(r.ConsumerGroup); err != nil {
+		return err
+	}
+
+	if r.Version >= 1 {
+		pe.putInt32(r.ConsumerGroupGeneration)
+		if err := pe.putString(r.ConsumerID); err != nil {
+			return err
+		}
+	} else {
+		if r.ConsumerGroupGeneration != 0 {
+			Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
+		}
+		if r.ConsumerID != "" {
+			Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
+		}
+	}
+
+	if r.Version >= 2 {
+		pe.putInt64(r.RetentionTime)
+	} else if r.RetentionTime != 0 {
+		Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
+	}
+
+	if err := pe.putArrayLength(len(r.blocks)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.blocks {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err := block.encode(pe, r.Version); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.ConsumerGroup, err = pd.getString(); err != nil {
+		return err
+	}
+
+	if r.Version >= 1 {
+		if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
+			return err
+		}
+		if r.ConsumerID, err = pd.getString(); err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 2 {
+		if r.RetentionTime, err = pd.getInt64(); err != nil {
+			return err
+		}
+	}
+
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount == 0 {
+		return nil
+	}
+	r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+	for i := 0; i < topicCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitionCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+		for j := 0; j < partitionCount; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			block := &offsetCommitRequestBlock{}
+			if err := block.decode(pd, r.Version); err != nil {
+				return err
+			}
+			r.blocks[topic][partition] = block
+		}
+	}
+	return nil
+}
+
+func (r *OffsetCommitRequest) key() int16 {
+	return 8
+}
+
+func (r *OffsetCommitRequest) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_8_2_0
+	case 2:
+		return V0_9_0_0
+	case 3:
+		return V0_11_0_0
+	case 4:
+		return V2_0_0_0
+	default:
+		return MinVersion
+	}
+}
+
+func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
+	if r.blocks == nil {
+		r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+	}
+
+	r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
+}
+
+func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) {
+	partitions := r.blocks[topic]
+	if partitions == nil {
+		return 0, "", errors.New("No such offset")
+	}
+	block := partitions[partitionID]
+	if block == nil {
+		return 0, "", errors.New("No such offset")
+	}
+	return block.offset, block.metadata, nil
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go
new file mode 100644
index 0000000..e842298
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_commit_response.go
@@ -0,0 +1,110 @@
+package sarama
+
+type OffsetCommitResponse struct {
+	Version        int16
+	ThrottleTimeMs int32
+	Errors         map[string]map[int32]KError
+}
+
+func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
+	if r.Errors == nil {
+		r.Errors = make(map[string]map[int32]KError)
+	}
+	partitions := r.Errors[topic]
+	if partitions == nil {
+		partitions = make(map[int32]KError)
+		r.Errors[topic] = partitions
+	}
+	partitions[partition] = kerror
+}
+
+func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
+	if r.Version >= 3 {
+		pe.putInt32(r.ThrottleTimeMs)
+	}
+	if err := pe.putArrayLength(len(r.Errors)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.Errors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, kerror := range partitions {
+			pe.putInt32(partition)
+			pe.putInt16(int16(kerror))
+		}
+	}
+	return nil
+}
+
+func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if version >= 3 {
+		r.ThrottleTimeMs, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil || numTopics == 0 {
+		return err
+	}
+
+	r.Errors = make(map[string]map[int32]KError, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numErrors, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Errors[name] = make(map[int32]KError, numErrors)
+
+		for j := 0; j < numErrors; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			tmp, err := pd.getInt16()
+			if err != nil {
+				return err
+			}
+			r.Errors[name][id] = KError(tmp)
+		}
+	}
+
+	return nil
+}
+
+func (r *OffsetCommitResponse) key() int16 {
+	return 8
+}
+
+func (r *OffsetCommitResponse) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_8_2_0
+	case 2:
+		return V0_9_0_0
+	case 3:
+		return V0_11_0_0
+	case 4:
+		return V2_0_0_0
+	default:
+		return MinVersion
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
new file mode 100644
index 0000000..6860824
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
@@ -0,0 +1,100 @@
+package sarama
+
+type OffsetFetchRequest struct {
+	Version       int16
+	ConsumerGroup string
+	partitions    map[string][]int32
+}
+
+func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
+	if r.Version < 0 || r.Version > 5 {
+		return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
+	}
+
+	if err = pe.putString(r.ConsumerGroup); err != nil {
+		return err
+	}
+
+	if r.Version >= 2 && r.partitions == nil {
+		pe.putInt32(-1)
+	} else {
+		if err = pe.putArrayLength(len(r.partitions)); err != nil {
+			return err
+		}
+		for topic, partitions := range r.partitions {
+			if err = pe.putString(topic); err != nil {
+				return err
+			}
+			if err = pe.putInt32Array(partitions); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.ConsumerGroup, err = pd.getString(); err != nil {
+		return err
+	}
+	partitionCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if (partitionCount == 0 && version < 2) || partitionCount < 0 {
+		return nil
+	}
+	r.partitions = make(map[string][]int32)
+	for i := 0; i < partitionCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitions, err := pd.getInt32Array()
+		if err != nil {
+			return err
+		}
+		r.partitions[topic] = partitions
+	}
+	return nil
+}
+
+func (r *OffsetFetchRequest) key() int16 {
+	return 9
+}
+
+func (r *OffsetFetchRequest) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_8_2_0
+	case 2:
+		return V0_10_2_0
+	case 3:
+		return V0_11_0_0
+	case 4:
+		return V2_0_0_0
+	case 5:
+		return V2_1_0_0
+	default:
+		return MinVersion
+	}
+}
+
+func (r *OffsetFetchRequest) ZeroPartitions() {
+	if r.partitions == nil && r.Version >= 2 {
+		r.partitions = make(map[string][]int32)
+	}
+}
+
+func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
+	if r.partitions == nil {
+		r.partitions = make(map[string][]int32)
+	}
+
+	r.partitions[topic] = append(r.partitions[topic], partitionID)
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
new file mode 100644
index 0000000..9e25702
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
@@ -0,0 +1,197 @@
+package sarama
+
+type OffsetFetchResponseBlock struct {
+	Offset      int64
+	LeaderEpoch int32
+	Metadata    string
+	Err         KError
+}
+
+func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+	b.Offset, err = pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	if version >= 5 {
+		b.LeaderEpoch, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
+	b.Metadata, err = pd.getString()
+	if err != nil {
+		return err
+	}
+
+	tmp, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	b.Err = KError(tmp)
+
+	return nil
+}
+
+func (b *OffsetFetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+	pe.putInt64(b.Offset)
+
+	if version >= 5 {
+		pe.putInt32(b.LeaderEpoch)
+	}
+
+	err = pe.putString(b.Metadata)
+	if err != nil {
+		return err
+	}
+
+	pe.putInt16(int16(b.Err))
+
+	return nil
+}
+
+type OffsetFetchResponse struct {
+	Version        int16
+	ThrottleTimeMs int32
+	Blocks         map[string]map[int32]*OffsetFetchResponseBlock
+	Err            KError
+}
+
+func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
+	if r.Version >= 3 {
+		pe.putInt32(r.ThrottleTimeMs)
+	}
+
+	if err := pe.putArrayLength(len(r.Blocks)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.Blocks {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err := block.encode(pe, r.Version); err != nil {
+				return err
+			}
+		}
+	}
+	if r.Version >= 2 {
+		pe.putInt16(int16(r.Err))
+	}
+	return nil
+}
+
+func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if version >= 3 {
+		r.ThrottleTimeMs, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if numTopics > 0 {
+		r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
+		for i := 0; i < numTopics; i++ {
+			name, err := pd.getString()
+			if err != nil {
+				return err
+			}
+
+			numBlocks, err := pd.getArrayLength()
+			if err != nil {
+				return err
+			}
+
+			if numBlocks == 0 {
+				r.Blocks[name] = nil
+				continue
+			}
+			r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
+
+			for j := 0; j < numBlocks; j++ {
+				id, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+
+				block := new(OffsetFetchResponseBlock)
+				err = block.decode(pd, version)
+				if err != nil {
+					return err
+				}
+				r.Blocks[name][id] = block
+			}
+		}
+	}
+
+	if version >= 2 {
+		kerr, err := pd.getInt16()
+		if err != nil {
+			return err
+		}
+		r.Err = KError(kerr)
+	}
+
+	return nil
+}
+
+func (r *OffsetFetchResponse) key() int16 {
+	return 9
+}
+
+func (r *OffsetFetchResponse) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_8_2_0
+	case 2:
+		return V0_10_2_0
+	case 3:
+		return V0_11_0_0
+	case 4:
+		return V2_0_0_0
+	case 5:
+		return V2_1_0_0
+	default:
+		return MinVersion
+	}
+}
+
+func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
+	if r.Blocks == nil {
+		return nil
+	}
+
+	if r.Blocks[topic] == nil {
+		return nil
+	}
+
+	return r.Blocks[topic][partition]
+}
+
+func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
+	}
+	partitions := r.Blocks[topic]
+	if partitions == nil {
+		partitions = make(map[int32]*OffsetFetchResponseBlock)
+		r.Blocks[topic] = partitions
+	}
+	partitions[partition] = block
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go
new file mode 100644
index 0000000..8ea857f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_manager.go
@@ -0,0 +1,572 @@
+package sarama
+
+import (
+	"sync"
+	"time"
+)
+
+// Offset Manager
+
+// OffsetManager uses Kafka to store and fetch consumed partition offsets.
+type OffsetManager interface {
+	// ManagePartition creates a PartitionOffsetManager on the given topic/partition.
+	// It will return an error if this OffsetManager is already managing the given
+	// topic/partition.
+	ManagePartition(topic string, partition int32) (PartitionOffsetManager, error)
+
+	// Close stops the OffsetManager from managing offsets. It is required to call
+	// this function before an OffsetManager object passes out of scope, as it
+	// will otherwise leak memory. You must call this after all the
+	// PartitionOffsetManagers are closed.
+	Close() error
+}
+
+type offsetManager struct {
+	client Client
+	conf   *Config
+	group  string
+	ticker *time.Ticker
+
+	memberID   string
+	generation int32
+
+	broker     *Broker
+	brokerLock sync.RWMutex
+
+	poms     map[string]map[int32]*partitionOffsetManager
+	pomsLock sync.RWMutex
+
+	closeOnce sync.Once
+	closing   chan none
+	closed    chan none
+}
+
+// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
+// It is still necessary to call Close() on the underlying client when finished with the partition manager.
+func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
+	return newOffsetManagerFromClient(group, "", GroupGenerationUndefined, client)
+}
+
+func newOffsetManagerFromClient(group, memberID string, generation int32, client Client) (*offsetManager, error) {
+	// Check that we are not dealing with a closed Client before processing any other arguments
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	conf := client.Config()
+	om := &offsetManager{
+		client: client,
+		conf:   conf,
+		group:  group,
+		ticker: time.NewTicker(conf.Consumer.Offsets.CommitInterval),
+		poms:   make(map[string]map[int32]*partitionOffsetManager),
+
+		memberID:   memberID,
+		generation: generation,
+
+		closing: make(chan none),
+		closed:  make(chan none),
+	}
+	go withRecover(om.mainLoop)
+
+	return om, nil
+}
+
+func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) {
+	pom, err := om.newPartitionOffsetManager(topic, partition)
+	if err != nil {
+		return nil, err
+	}
+
+	om.pomsLock.Lock()
+	defer om.pomsLock.Unlock()
+
+	topicManagers := om.poms[topic]
+	if topicManagers == nil {
+		topicManagers = make(map[int32]*partitionOffsetManager)
+		om.poms[topic] = topicManagers
+	}
+
+	if topicManagers[partition] != nil {
+		return nil, ConfigurationError("That topic/partition is already being managed")
+	}
+
+	topicManagers[partition] = pom
+	return pom, nil
+}
+
+func (om *offsetManager) Close() error {
+	om.closeOnce.Do(func() {
+		// exit the mainLoop
+		close(om.closing)
+		<-om.closed
+
+		// mark all POMs as closed
+		om.asyncClosePOMs()
+
+		// flush one last time
+		for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ {
+			om.flushToBroker()
+			if om.releasePOMs(false) == 0 {
+				break
+			}
+		}
+
+		om.releasePOMs(true)
+		om.brokerLock.Lock()
+		om.broker = nil
+		om.brokerLock.Unlock()
+	})
+	return nil
+}
+
+func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, string, error) {
+	broker, err := om.coordinator()
+	if err != nil {
+		if retries <= 0 {
+			return 0, "", err
+		}
+		return om.fetchInitialOffset(topic, partition, retries-1)
+	}
+
+	req := new(OffsetFetchRequest)
+	req.Version = 1
+	req.ConsumerGroup = om.group
+	req.AddPartition(topic, partition)
+
+	resp, err := broker.FetchOffset(req)
+	if err != nil {
+		if retries <= 0 {
+			return 0, "", err
+		}
+		om.releaseCoordinator(broker)
+		return om.fetchInitialOffset(topic, partition, retries-1)
+	}
+
+	block := resp.GetBlock(topic, partition)
+	if block == nil {
+		return 0, "", ErrIncompleteResponse
+	}
+
+	switch block.Err {
+	case ErrNoError:
+		return block.Offset, block.Metadata, nil
+	case ErrNotCoordinatorForConsumer:
+		if retries <= 0 {
+			return 0, "", block.Err
+		}
+		om.releaseCoordinator(broker)
+		return om.fetchInitialOffset(topic, partition, retries-1)
+	case ErrOffsetsLoadInProgress:
+		if retries <= 0 {
+			return 0, "", block.Err
+		}
+		select {
+		case <-om.closing:
+			return 0, "", block.Err
+		case <-time.After(om.conf.Metadata.Retry.Backoff):
+		}
+		return om.fetchInitialOffset(topic, partition, retries-1)
+	default:
+		return 0, "", block.Err
+	}
+}
+
+func (om *offsetManager) coordinator() (*Broker, error) {
+	om.brokerLock.RLock()
+	broker := om.broker
+	om.brokerLock.RUnlock()
+
+	if broker != nil {
+		return broker, nil
+	}
+
+	om.brokerLock.Lock()
+	defer om.brokerLock.Unlock()
+
+	if broker := om.broker; broker != nil {
+		return broker, nil
+	}
+
+	if err := om.client.RefreshCoordinator(om.group); err != nil {
+		return nil, err
+	}
+
+	broker, err := om.client.Coordinator(om.group)
+	if err != nil {
+		return nil, err
+	}
+
+	om.broker = broker
+	return broker, nil
+}
+
+func (om *offsetManager) releaseCoordinator(b *Broker) {
+	om.brokerLock.Lock()
+	if om.broker == b {
+		om.broker = nil
+	}
+	om.brokerLock.Unlock()
+}
+
+func (om *offsetManager) mainLoop() {
+	defer om.ticker.Stop()
+	defer close(om.closed)
+
+	for {
+		select {
+		case <-om.ticker.C:
+			om.flushToBroker()
+			om.releasePOMs(false)
+		case <-om.closing:
+			return
+		}
+	}
+}
+
+func (om *offsetManager) flushToBroker() {
+	req := om.constructRequest()
+	if req == nil {
+		return
+	}
+
+	broker, err := om.coordinator()
+	if err != nil {
+		om.handleError(err)
+		return
+	}
+
+	resp, err := broker.CommitOffset(req)
+	if err != nil {
+		om.handleError(err)
+		om.releaseCoordinator(broker)
+		_ = broker.Close()
+		return
+	}
+
+	om.handleResponse(broker, req, resp)
+}
+
+func (om *offsetManager) constructRequest() *OffsetCommitRequest {
+	var r *OffsetCommitRequest
+	var perPartitionTimestamp int64
+	if om.conf.Consumer.Offsets.Retention == 0 {
+		perPartitionTimestamp = ReceiveTime
+		r = &OffsetCommitRequest{
+			Version:                 1,
+			ConsumerGroup:           om.group,
+			ConsumerID:              om.memberID,
+			ConsumerGroupGeneration: om.generation,
+		}
+	} else {
+		r = &OffsetCommitRequest{
+			Version:                 2,
+			RetentionTime:           int64(om.conf.Consumer.Offsets.Retention / time.Millisecond),
+			ConsumerGroup:           om.group,
+			ConsumerID:              om.memberID,
+			ConsumerGroupGeneration: om.generation,
+		}
+
+	}
+
+	om.pomsLock.RLock()
+	defer om.pomsLock.RUnlock()
+
+	for _, topicManagers := range om.poms {
+		for _, pom := range topicManagers {
+			pom.lock.Lock()
+			if pom.dirty {
+				r.AddBlock(pom.topic, pom.partition, pom.offset, perPartitionTimestamp, pom.metadata)
+			}
+			pom.lock.Unlock()
+		}
+	}
+
+	if len(r.blocks) > 0 {
+		return r
+	}
+
+	return nil
+}
+
+func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest, resp *OffsetCommitResponse) {
+	om.pomsLock.RLock()
+	defer om.pomsLock.RUnlock()
+
+	for _, topicManagers := range om.poms {
+		for _, pom := range topicManagers {
+			if req.blocks[pom.topic] == nil || req.blocks[pom.topic][pom.partition] == nil {
+				continue
+			}
+
+			var err KError
+			var ok bool
+
+			if resp.Errors[pom.topic] == nil {
+				pom.handleError(ErrIncompleteResponse)
+				continue
+			}
+			if err, ok = resp.Errors[pom.topic][pom.partition]; !ok {
+				pom.handleError(ErrIncompleteResponse)
+				continue
+			}
+
+			switch err {
+			case ErrNoError:
+				block := req.blocks[pom.topic][pom.partition]
+				pom.updateCommitted(block.offset, block.metadata)
+			case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
+				ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
+				// not a critical error, we just need to redispatch
+				om.releaseCoordinator(broker)
+			case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
+				// nothing we can do about this, just tell the user and carry on
+				pom.handleError(err)
+			case ErrOffsetsLoadInProgress:
+				// nothing wrong but we didn't commit, we'll get it next time round
+				break
+			case ErrUnknownTopicOrPartition:
+				// let the user know *and* try redispatching - if topic-auto-create is
+				// enabled, redispatching should trigger a metadata req and create the
+				// topic; if not then re-dispatching won't help, but we've let the user
+				// know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706)
+				fallthrough
+			default:
+				// dunno, tell the user and try redispatching
+				pom.handleError(err)
+				om.releaseCoordinator(broker)
+			}
+		}
+	}
+}
+
+func (om *offsetManager) handleError(err error) {
+	om.pomsLock.RLock()
+	defer om.pomsLock.RUnlock()
+
+	for _, topicManagers := range om.poms {
+		for _, pom := range topicManagers {
+			pom.handleError(err)
+		}
+	}
+}
+
+func (om *offsetManager) asyncClosePOMs() {
+	om.pomsLock.RLock()
+	defer om.pomsLock.RUnlock()
+
+	for _, topicManagers := range om.poms {
+		for _, pom := range topicManagers {
+			pom.AsyncClose()
+		}
+	}
+}
+
+// Releases/removes closed POMs once they are clean (or when forced)
+func (om *offsetManager) releasePOMs(force bool) (remaining int) {
+	om.pomsLock.Lock()
+	defer om.pomsLock.Unlock()
+
+	for topic, topicManagers := range om.poms {
+		for partition, pom := range topicManagers {
+			pom.lock.Lock()
+			releaseDue := pom.done && (force || !pom.dirty)
+			pom.lock.Unlock()
+
+			if releaseDue {
+				pom.release()
+
+				delete(om.poms[topic], partition)
+				if len(om.poms[topic]) == 0 {
+					delete(om.poms, topic)
+				}
+			}
+		}
+		remaining += len(om.poms[topic])
+	}
+	return
+}
+
+func (om *offsetManager) findPOM(topic string, partition int32) *partitionOffsetManager {
+	om.pomsLock.RLock()
+	defer om.pomsLock.RUnlock()
+
+	if partitions, ok := om.poms[topic]; ok {
+		if pom, ok := partitions[partition]; ok {
+			return pom
+		}
+	}
+	return nil
+}
+
+// Partition Offset Manager
+
+// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close()
+// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes
+// out of scope.
+type PartitionOffsetManager interface {
+	// NextOffset returns the next offset that should be consumed for the managed
+	// partition, accompanied by metadata which can be used to reconstruct the state
+	// of the partition consumer when it resumes. NextOffset() will return
+	// `config.Consumer.Offsets.Initial` and an empty metadata string if no offset
+	// was committed for this partition yet.
+	NextOffset() (int64, string)
+
+	// MarkOffset marks the provided offset, alongside a metadata string
+	// that represents the state of the partition consumer at that point in time. The
+	// metadata string can be used by another consumer to restore that state, so it
+	// can resume consumption.
+	//
+	// To follow upstream conventions, you are expected to mark the offset of the
+	// next message to read, not the last message read. Thus, when calling `MarkOffset`
+	// you should typically add one to the offset of the last consumed message.
+	//
+	// Note: calling MarkOffset does not necessarily commit the offset to the backend
+	// store immediately for efficiency reasons, and it may never be committed if
+	// your application crashes. This means that you may end up processing the same
+	// message twice, and your processing should ideally be idempotent.
+	MarkOffset(offset int64, metadata string)
+
+	// ResetOffset resets to the provided offset, alongside a metadata string that
+	// represents the state of the partition consumer at that point in time. Reset
+	// acts as a counterpart to MarkOffset, the difference being that it allows to
+	// reset an offset to an earlier or smaller value, where MarkOffset only
+	// allows incrementing the offset. cf MarkOffset for more details.
+	ResetOffset(offset int64, metadata string)
+
+	// Errors returns a read channel of errors that occur during offset management, if
+	// enabled. By default, errors are logged and not returned over this channel. If
+	// you want to implement any custom error handling, set your config's
+	// Consumer.Return.Errors setting to true, and read from this channel.
+	Errors() <-chan *ConsumerError
+
+	// AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will
+	// return immediately, after which you should wait until the 'errors' channel has
+	// been drained and closed. It is required to call this function, or Close before
+	// a consumer object passes out of scope, as it will otherwise leak memory. You
+	// must call this before calling Close on the underlying client.
+	AsyncClose()
+
+	// Close stops the PartitionOffsetManager from managing offsets. It is required to
+	// call this function (or AsyncClose) before a PartitionOffsetManager object
+	// passes out of scope, as it will otherwise leak memory. You must call this
+	// before calling Close on the underlying client.
+	Close() error
+}
+
+type partitionOffsetManager struct {
+	parent    *offsetManager
+	topic     string
+	partition int32
+
+	lock     sync.Mutex
+	offset   int64
+	metadata string
+	dirty    bool
+	done     bool
+
+	releaseOnce sync.Once
+	errors      chan *ConsumerError
+}
+
+func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
+	offset, metadata, err := om.fetchInitialOffset(topic, partition, om.conf.Metadata.Retry.Max)
+	if err != nil {
+		return nil, err
+	}
+
+	return &partitionOffsetManager{
+		parent:    om,
+		topic:     topic,
+		partition: partition,
+		errors:    make(chan *ConsumerError, om.conf.ChannelBufferSize),
+		offset:    offset,
+		metadata:  metadata,
+	}, nil
+}
+
+func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
+	return pom.errors
+}
+
+func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
+	pom.lock.Lock()
+	defer pom.lock.Unlock()
+
+	if offset > pom.offset {
+		pom.offset = offset
+		pom.metadata = metadata
+		pom.dirty = true
+	}
+}
+
+func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) {
+	pom.lock.Lock()
+	defer pom.lock.Unlock()
+
+	if offset <= pom.offset {
+		pom.offset = offset
+		pom.metadata = metadata
+		pom.dirty = true
+	}
+}
+
+func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
+	pom.lock.Lock()
+	defer pom.lock.Unlock()
+
+	if pom.offset == offset && pom.metadata == metadata {
+		pom.dirty = false
+	}
+}
+
+func (pom *partitionOffsetManager) NextOffset() (int64, string) {
+	pom.lock.Lock()
+	defer pom.lock.Unlock()
+
+	if pom.offset >= 0 {
+		return pom.offset, pom.metadata
+	}
+
+	return pom.parent.conf.Consumer.Offsets.Initial, ""
+}
+
+func (pom *partitionOffsetManager) AsyncClose() {
+	pom.lock.Lock()
+	pom.done = true
+	pom.lock.Unlock()
+}
+
+func (pom *partitionOffsetManager) Close() error {
+	pom.AsyncClose()
+
+	var errors ConsumerErrors
+	for err := range pom.errors {
+		errors = append(errors, err)
+	}
+
+	if len(errors) > 0 {
+		return errors
+	}
+	return nil
+}
+
+func (pom *partitionOffsetManager) handleError(err error) {
+	cErr := &ConsumerError{
+		Topic:     pom.topic,
+		Partition: pom.partition,
+		Err:       err,
+	}
+
+	if pom.parent.conf.Consumer.Return.Errors {
+		pom.errors <- cErr
+	} else {
+		Logger.Println(cErr)
+	}
+}
+
+func (pom *partitionOffsetManager) release() {
+	pom.releaseOnce.Do(func() {
+		go close(pom.errors)
+	})
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go
new file mode 100644
index 0000000..326c372
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_request.go
@@ -0,0 +1,156 @@
+package sarama
+
+type offsetRequestBlock struct {
+	time       int64
+	maxOffsets int32 // Only used in version 0
+}
+
+func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
+	pe.putInt64(int64(b.time))
+	if version == 0 {
+		pe.putInt32(b.maxOffsets)
+	}
+
+	return nil
+}
+
+func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+	if b.time, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if version == 0 {
+		if b.maxOffsets, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+type OffsetRequest struct {
+	Version        int16
+	replicaID      int32
+	isReplicaIDSet bool
+	blocks         map[string]map[int32]*offsetRequestBlock
+}
+
+func (r *OffsetRequest) encode(pe packetEncoder) error {
+	if r.isReplicaIDSet {
+		pe.putInt32(r.replicaID)
+	} else {
+		// default replica ID is always -1 for clients
+		pe.putInt32(-1)
+	}
+
+	err := pe.putArrayLength(len(r.blocks))
+	if err != nil {
+		return err
+	}
+	for topic, partitions := range r.blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err = block.encode(pe, r.Version); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (r *OffsetRequest) decode(pd packetDecoder, version int16) error {
+	r.Version = version
+
+	replicaID, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	if replicaID >= 0 {
+		r.SetReplicaID(replicaID)
+	}
+
+	blockCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if blockCount == 0 {
+		return nil
+	}
+	r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+	for i := 0; i < blockCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitionCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+		for j := 0; j < partitionCount; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			block := &offsetRequestBlock{}
+			if err := block.decode(pd, version); err != nil {
+				return err
+			}
+			r.blocks[topic][partition] = block
+		}
+	}
+	return nil
+}
+
+func (r *OffsetRequest) key() int16 {
+	return 2
+}
+
+func (r *OffsetRequest) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_10_1_0
+	default:
+		return MinVersion
+	}
+}
+
+func (r *OffsetRequest) SetReplicaID(id int32) {
+	r.replicaID = id
+	r.isReplicaIDSet = true
+}
+
+func (r *OffsetRequest) ReplicaID() int32 {
+	if r.isReplicaIDSet {
+		return r.replicaID
+	}
+	return -1
+}
+
+func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) {
+	if r.blocks == nil {
+		r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+	}
+
+	tmp := new(offsetRequestBlock)
+	tmp.time = time
+	if r.Version == 0 {
+		tmp.maxOffsets = maxOffsets
+	}
+
+	r.blocks[topic][partitionID] = tmp
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go
new file mode 100644
index 0000000..8b2193f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_response.go
@@ -0,0 +1,174 @@
+package sarama
+
+type OffsetResponseBlock struct {
+	Err       KError
+	Offsets   []int64 // Version 0
+	Offset    int64   // Version 1
+	Timestamp int64   // Version 1
+}
+
+func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+	tmp, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	b.Err = KError(tmp)
+
+	if version == 0 {
+		b.Offsets, err = pd.getInt64Array()
+
+		return err
+	}
+
+	b.Timestamp, err = pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	b.Offset, err = pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	// For backwards compatibility put the offset in the offsets array too
+	b.Offsets = []int64{b.Offset}
+
+	return nil
+}
+
+func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+	pe.putInt16(int16(b.Err))
+
+	if version == 0 {
+		return pe.putInt64Array(b.Offsets)
+	}
+
+	pe.putInt64(b.Timestamp)
+	pe.putInt64(b.Offset)
+
+	return nil
+}
+
+type OffsetResponse struct {
+	Version int16
+	Blocks  map[string]map[int32]*OffsetResponseBlock
+}
+
+func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numBlocks, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
+
+		for j := 0; j < numBlocks; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			block := new(OffsetResponseBlock)
+			err = block.decode(pd, version)
+			if err != nil {
+				return err
+			}
+			r.Blocks[name][id] = block
+		}
+	}
+
+	return nil
+}
+
+func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
+	if r.Blocks == nil {
+		return nil
+	}
+
+	if r.Blocks[topic] == nil {
+		return nil
+	}
+
+	return r.Blocks[topic][partition]
+}
+
+/*
+// [0 0 0 1 ntopics
+0 8 109 121 95 116 111 112 105 99 topic
+0 0 0 1 npartitions
+0 0 0 0 id
+0 0
+
+0 0 0 1 0 0 0 0
+0 1 1 1 0 0 0 1
+0 8 109 121 95 116 111 112
+105 99 0 0 0 1 0 0
+0 0 0 0 0 0 0 1
+0 0 0 0 0 1 1 1] <nil>
+
+*/
+func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
+	if err = pe.putArrayLength(len(r.Blocks)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.Blocks {
+		if err = pe.putString(topic); err != nil {
+			return err
+		}
+		if err = pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err = block.encode(pe, r.version()); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (r *OffsetResponse) key() int16 {
+	return 2
+}
+
+func (r *OffsetResponse) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_10_1_0
+	default:
+		return MinVersion
+	}
+}
+
+// testing API
+
+func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
+	}
+	byTopic, ok := r.Blocks[topic]
+	if !ok {
+		byTopic = make(map[int32]*OffsetResponseBlock)
+		r.Blocks[topic] = byTopic
+	}
+	byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset}
+}
diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go
new file mode 100644
index 0000000..74805cc
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/packet_decoder.go
@@ -0,0 +1,60 @@
+package sarama
+
+// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
+// Types implementing Decoder only need to worry about calling methods like GetString,
+// not about how a string is represented in Kafka.
+type packetDecoder interface {
+	// Primitives
+	getInt8() (int8, error)
+	getInt16() (int16, error)
+	getInt32() (int32, error)
+	getInt64() (int64, error)
+	getVarint() (int64, error)
+	getArrayLength() (int, error)
+	getBool() (bool, error)
+
+	// Collections
+	getBytes() ([]byte, error)
+	getVarintBytes() ([]byte, error)
+	getRawBytes(length int) ([]byte, error)
+	getString() (string, error)
+	getNullableString() (*string, error)
+	getInt32Array() ([]int32, error)
+	getInt64Array() ([]int64, error)
+	getStringArray() ([]string, error)
+
+	// Subsets
+	remaining() int
+	getSubset(length int) (packetDecoder, error)
+	peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset
+
+	// Stacks, see PushDecoder
+	push(in pushDecoder) error
+	pop() error
+}
+
+// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
+// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
+// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
+// depend upon have been decoded.
+type pushDecoder interface {
+	// Saves the offset into the input buffer as the location to actually read the calculated value when able.
+	saveOffset(in int)
+
+	// Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32).
+	reserveLength() int
+
+	// Indicates that all required data is now available to calculate and check the field.
+	// SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
+	// of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
+	check(curOffset int, buf []byte) error
+}
+
+// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the
+// fields itself is unknown until its value was decoded (for instance varint encoded length
+// fields).
+// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength()
+type dynamicPushDecoder interface {
+	pushDecoder
+	decoder
+}
diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go
new file mode 100644
index 0000000..67b8dae
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/packet_encoder.go
@@ -0,0 +1,65 @@
+package sarama
+
+import "github.com/rcrowley/go-metrics"
+
+// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
+// Types implementing Encoder only need to worry about calling methods like PutString,
+// not about how a string is represented in Kafka.
+type packetEncoder interface {
+	// Primitives
+	putInt8(in int8)
+	putInt16(in int16)
+	putInt32(in int32)
+	putInt64(in int64)
+	putVarint(in int64)
+	putArrayLength(in int) error
+	putBool(in bool)
+
+	// Collections
+	putBytes(in []byte) error
+	putVarintBytes(in []byte) error
+	putRawBytes(in []byte) error
+	putString(in string) error
+	putNullableString(in *string) error
+	putStringArray(in []string) error
+	putInt32Array(in []int32) error
+	putInt64Array(in []int64) error
+
+	// Provide the current offset to record the batch size metric
+	offset() int
+
+	// Stacks, see PushEncoder
+	push(in pushEncoder)
+	pop() error
+
+	// To record metrics when provided
+	metricRegistry() metrics.Registry
+}
+
+// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
+// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
+// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
+// depend upon have been written.
+type pushEncoder interface {
+	// Saves the offset into the input buffer as the location to actually write the calculated value when able.
+	saveOffset(in int)
+
+	// Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
+	reserveLength() int
+
+	// Indicates that all required data is now available to calculate and write the field.
+	// SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
+	// of data to the saved offset, based on the data between the saved offset and curOffset.
+	run(curOffset int, buf []byte) error
+}
+
+// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the
+// fields itself is unknown until its value was computed (for instance varint encoded length
+// fields).
+type dynamicPushEncoder interface {
+	pushEncoder
+
+	// Called during pop() to adjust the length of the field.
+	// It should return the difference in bytes between the last computed length and current length.
+	adjustLength(currOffset int) int
+}
diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go
new file mode 100644
index 0000000..6a708e7
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/partitioner.go
@@ -0,0 +1,217 @@
+package sarama
+
+import (
+	"hash"
+	"hash/fnv"
+	"math/rand"
+	"time"
+)
+
+// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
+// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
+// as simple default implementations.
+type Partitioner interface {
+	// Partition takes a message and partition count and chooses a partition
+	Partition(message *ProducerMessage, numPartitions int32) (int32, error)
+
+	// RequiresConsistency indicates to the user of the partitioner whether the
+	// mapping of key->partition is consistent or not. Specifically, if a
+	// partitioner requires consistency then it must be allowed to choose from all
+	// partitions (even ones known to be unavailable), and its choice must be
+	// respected by the caller. The obvious example is the HashPartitioner.
+	RequiresConsistency() bool
+}
+
+// DynamicConsistencyPartitioner can optionally be implemented by Partitioners
+// in order to allow more flexibility than is originally allowed by the
+// RequiresConsistency method in the Partitioner interface. This allows
+// partitioners to require consistency sometimes, but not all times. It's useful
+// for, e.g., the HashPartitioner, which does not require consistency if the
+// message key is nil.
+type DynamicConsistencyPartitioner interface {
+	Partitioner
+
+	// MessageRequiresConsistency is similar to Partitioner.RequiresConsistency,
+	// but takes in the message being partitioned so that the partitioner can
+	// make a per-message determination.
+	MessageRequiresConsistency(message *ProducerMessage) bool
+}
+
+// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
+type PartitionerConstructor func(topic string) Partitioner
+
+type manualPartitioner struct{}
+
+// HashPartitionOption lets you modify default values of the partitioner
+type HashPartitionerOption func(*hashPartitioner)
+
+// WithAbsFirst means that the partitioner handles absolute values
+// in the same way as the reference Java implementation
+func WithAbsFirst() HashPartitionerOption {
+	return func(hp *hashPartitioner) {
+		hp.referenceAbs = true
+	}
+}
+
+// WithCustomHashFunction lets you specify what hash function to use for the partitioning
+func WithCustomHashFunction(hasher func() hash.Hash32) HashPartitionerOption {
+	return func(hp *hashPartitioner) {
+		hp.hasher = hasher()
+	}
+}
+
+// WithCustomFallbackPartitioner lets you specify what HashPartitioner should be used in case a Distribution Key is empty
+func WithCustomFallbackPartitioner(randomHP *hashPartitioner) HashPartitionerOption {
+	return func(hp *hashPartitioner) {
+		hp.random = hp
+	}
+}
+
+// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
+// ProducerMessage's Partition field as the partition to produce to.
+func NewManualPartitioner(topic string) Partitioner {
+	return new(manualPartitioner)
+}
+
+func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+	return message.Partition, nil
+}
+
+func (p *manualPartitioner) RequiresConsistency() bool {
+	return true
+}
+
+type randomPartitioner struct {
+	generator *rand.Rand
+}
+
+// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
+func NewRandomPartitioner(topic string) Partitioner {
+	p := new(randomPartitioner)
+	p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
+	return p
+}
+
+func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+	return int32(p.generator.Intn(int(numPartitions))), nil
+}
+
+func (p *randomPartitioner) RequiresConsistency() bool {
+	return false
+}
+
+type roundRobinPartitioner struct {
+	partition int32
+}
+
+// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
+func NewRoundRobinPartitioner(topic string) Partitioner {
+	return &roundRobinPartitioner{}
+}
+
+func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+	if p.partition >= numPartitions {
+		p.partition = 0
+	}
+	ret := p.partition
+	p.partition++
+	return ret, nil
+}
+
+func (p *roundRobinPartitioner) RequiresConsistency() bool {
+	return false
+}
+
+type hashPartitioner struct {
+	random       Partitioner
+	hasher       hash.Hash32
+	referenceAbs bool
+}
+
+// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher.
+// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that
+// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance.
+func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor {
+	return func(topic string) Partitioner {
+		p := new(hashPartitioner)
+		p.random = NewRandomPartitioner(topic)
+		p.hasher = hasher()
+		p.referenceAbs = false
+		return p
+	}
+}
+
+// NewCustomPartitioner creates a default Partitioner but lets you specify the behavior of each component via options
+func NewCustomPartitioner(options ...HashPartitionerOption) PartitionerConstructor {
+	return func(topic string) Partitioner {
+		p := new(hashPartitioner)
+		p.random = NewRandomPartitioner(topic)
+		p.hasher = fnv.New32a()
+		p.referenceAbs = false
+		for _, option := range options {
+			option(p)
+		}
+		return p
+	}
+}
+
+// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
+// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
+// modulus the number of partitions. This ensures that messages with the same key always end up on the
+// same partition.
+func NewHashPartitioner(topic string) Partitioner {
+	p := new(hashPartitioner)
+	p.random = NewRandomPartitioner(topic)
+	p.hasher = fnv.New32a()
+	p.referenceAbs = false
+	return p
+}
+
+// NewReferenceHashPartitioner is like NewHashPartitioner except that it handles absolute values
+// in the same way as the reference Java implementation. NewHashPartitioner was supposed to do
+// that but it had a mistake and now there are people depending on both behaviours. This will
+// all go away on the next major version bump.
+func NewReferenceHashPartitioner(topic string) Partitioner {
+	p := new(hashPartitioner)
+	p.random = NewRandomPartitioner(topic)
+	p.hasher = fnv.New32a()
+	p.referenceAbs = true
+	return p
+}
+
+func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+	if message.Key == nil {
+		return p.random.Partition(message, numPartitions)
+	}
+	bytes, err := message.Key.Encode()
+	if err != nil {
+		return -1, err
+	}
+	p.hasher.Reset()
+	_, err = p.hasher.Write(bytes)
+	if err != nil {
+		return -1, err
+	}
+	var partition int32
+	// Turns out we were doing our absolute value in a subtly different way from the upstream
+	// implementation, but now we need to maintain backwards compat for people who started using
+	// the old version; if referenceAbs is set we are compatible with the reference java client
+	// but not past Sarama versions
+	if p.referenceAbs {
+		partition = (int32(p.hasher.Sum32()) & 0x7fffffff) % numPartitions
+	} else {
+		partition = int32(p.hasher.Sum32()) % numPartitions
+		if partition < 0 {
+			partition = -partition
+		}
+	}
+	return partition, nil
+}
+
+func (p *hashPartitioner) RequiresConsistency() bool {
+	return true
+}
+
+func (p *hashPartitioner) MessageRequiresConsistency(message *ProducerMessage) bool {
+	return message.Key != nil
+}
diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go
new file mode 100644
index 0000000..b633cd1
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/prep_encoder.go
@@ -0,0 +1,153 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"fmt"
+	"math"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+type prepEncoder struct {
+	stack  []pushEncoder
+	length int
+}
+
+// primitives
+
+func (pe *prepEncoder) putInt8(in int8) {
+	pe.length++
+}
+
+func (pe *prepEncoder) putInt16(in int16) {
+	pe.length += 2
+}
+
+func (pe *prepEncoder) putInt32(in int32) {
+	pe.length += 4
+}
+
+func (pe *prepEncoder) putInt64(in int64) {
+	pe.length += 8
+}
+
+func (pe *prepEncoder) putVarint(in int64) {
+	var buf [binary.MaxVarintLen64]byte
+	pe.length += binary.PutVarint(buf[:], in)
+}
+
+func (pe *prepEncoder) putArrayLength(in int) error {
+	if in > math.MaxInt32 {
+		return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
+	}
+	pe.length += 4
+	return nil
+}
+
+func (pe *prepEncoder) putBool(in bool) {
+	pe.length++
+}
+
+// arrays
+
+func (pe *prepEncoder) putBytes(in []byte) error {
+	pe.length += 4
+	if in == nil {
+		return nil
+	}
+	return pe.putRawBytes(in)
+}
+
+func (pe *prepEncoder) putVarintBytes(in []byte) error {
+	if in == nil {
+		pe.putVarint(-1)
+		return nil
+	}
+	pe.putVarint(int64(len(in)))
+	return pe.putRawBytes(in)
+}
+
+func (pe *prepEncoder) putRawBytes(in []byte) error {
+	if len(in) > math.MaxInt32 {
+		return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
+	}
+	pe.length += len(in)
+	return nil
+}
+
+func (pe *prepEncoder) putNullableString(in *string) error {
+	if in == nil {
+		pe.length += 2
+		return nil
+	}
+	return pe.putString(*in)
+}
+
+func (pe *prepEncoder) putString(in string) error {
+	pe.length += 2
+	if len(in) > math.MaxInt16 {
+		return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))}
+	}
+	pe.length += len(in)
+	return nil
+}
+
+func (pe *prepEncoder) putStringArray(in []string) error {
+	err := pe.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+
+	for _, str := range in {
+		if err := pe.putString(str); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (pe *prepEncoder) putInt32Array(in []int32) error {
+	err := pe.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	pe.length += 4 * len(in)
+	return nil
+}
+
+func (pe *prepEncoder) putInt64Array(in []int64) error {
+	err := pe.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	pe.length += 8 * len(in)
+	return nil
+}
+
+func (pe *prepEncoder) offset() int {
+	return pe.length
+}
+
+// stackable
+
+func (pe *prepEncoder) push(in pushEncoder) {
+	in.saveOffset(pe.length)
+	pe.length += in.reserveLength()
+	pe.stack = append(pe.stack, in)
+}
+
+func (pe *prepEncoder) pop() error {
+	in := pe.stack[len(pe.stack)-1]
+	pe.stack = pe.stack[:len(pe.stack)-1]
+	if dpe, ok := in.(dynamicPushEncoder); ok {
+		pe.length += dpe.adjustLength(pe.length)
+	}
+
+	return nil
+}
+
+// we do not record metrics during the prep encoder pass
+func (pe *prepEncoder) metricRegistry() metrics.Registry {
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go
new file mode 100644
index 0000000..0c755d0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_request.go
@@ -0,0 +1,252 @@
+package sarama
+
+import "github.com/rcrowley/go-metrics"
+
+// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
+// it must see before responding. Any of the constants defined here are valid. On broker versions
+// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
+// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
+// by setting the `min.isr` value in the brokers configuration).
+type RequiredAcks int16
+
+const (
+	// NoResponse doesn't send any response, the TCP ACK is all you get.
+	NoResponse RequiredAcks = 0
+	// WaitForLocal waits for only the local commit to succeed before responding.
+	WaitForLocal RequiredAcks = 1
+	// WaitForAll waits for all in-sync replicas to commit before responding.
+	// The minimum number of in-sync replicas is configured on the broker via
+	// the `min.insync.replicas` configuration key.
+	WaitForAll RequiredAcks = -1
+)
+
+type ProduceRequest struct {
+	TransactionalID *string
+	RequiredAcks    RequiredAcks
+	Timeout         int32
+	Version         int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11
+	records         map[string]map[int32]Records
+}
+
+func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram,
+	topicCompressionRatioMetric metrics.Histogram) int64 {
+	var topicRecordCount int64
+	for _, messageBlock := range msgSet.Messages {
+		// Is this a fake "message" wrapping real messages?
+		if messageBlock.Msg.Set != nil {
+			topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
+		} else {
+			// A single uncompressed message
+			topicRecordCount++
+		}
+		// Better be safe than sorry when computing the compression ratio
+		if messageBlock.Msg.compressedSize != 0 {
+			compressionRatio := float64(len(messageBlock.Msg.Value)) /
+				float64(messageBlock.Msg.compressedSize)
+			// Histogram do not support decimal values, let's multiple it by 100 for better precision
+			intCompressionRatio := int64(100 * compressionRatio)
+			compressionRatioMetric.Update(intCompressionRatio)
+			topicCompressionRatioMetric.Update(intCompressionRatio)
+		}
+	}
+	return topicRecordCount
+}
+
+func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram,
+	topicCompressionRatioMetric metrics.Histogram) int64 {
+	if recordBatch.compressedRecords != nil {
+		compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100)
+		compressionRatioMetric.Update(compressionRatio)
+		topicCompressionRatioMetric.Update(compressionRatio)
+	}
+
+	return int64(len(recordBatch.Records))
+}
+
+func (r *ProduceRequest) encode(pe packetEncoder) error {
+	if r.Version >= 3 {
+		if err := pe.putNullableString(r.TransactionalID); err != nil {
+			return err
+		}
+	}
+	pe.putInt16(int16(r.RequiredAcks))
+	pe.putInt32(r.Timeout)
+	metricRegistry := pe.metricRegistry()
+	var batchSizeMetric metrics.Histogram
+	var compressionRatioMetric metrics.Histogram
+	if metricRegistry != nil {
+		batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
+		compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
+	}
+	totalRecordCount := int64(0)
+
+	err := pe.putArrayLength(len(r.records))
+	if err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.records {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+		topicRecordCount := int64(0)
+		var topicCompressionRatioMetric metrics.Histogram
+		if metricRegistry != nil {
+			topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
+		}
+		for id, records := range partitions {
+			startOffset := pe.offset()
+			pe.putInt32(id)
+			pe.push(&lengthField{})
+			err = records.encode(pe)
+			if err != nil {
+				return err
+			}
+			err = pe.pop()
+			if err != nil {
+				return err
+			}
+			if metricRegistry != nil {
+				if r.Version >= 3 {
+					topicRecordCount += updateBatchMetrics(records.RecordBatch, compressionRatioMetric, topicCompressionRatioMetric)
+				} else {
+					topicRecordCount += updateMsgSetMetrics(records.MsgSet, compressionRatioMetric, topicCompressionRatioMetric)
+				}
+				batchSize := int64(pe.offset() - startOffset)
+				batchSizeMetric.Update(batchSize)
+				getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize)
+			}
+		}
+		if topicRecordCount > 0 {
+			getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount)
+			getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount)
+			totalRecordCount += topicRecordCount
+		}
+	}
+	if totalRecordCount > 0 {
+		metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount)
+		getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount)
+	}
+
+	return nil
+}
+
+func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
+	r.Version = version
+
+	if version >= 3 {
+		id, err := pd.getNullableString()
+		if err != nil {
+			return err
+		}
+		r.TransactionalID = id
+	}
+	requiredAcks, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	r.RequiredAcks = RequiredAcks(requiredAcks)
+	if r.Timeout, err = pd.getInt32(); err != nil {
+		return err
+	}
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount == 0 {
+		return nil
+	}
+
+	r.records = make(map[string]map[int32]Records)
+	for i := 0; i < topicCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitionCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.records[topic] = make(map[int32]Records)
+
+		for j := 0; j < partitionCount; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			size, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			recordsDecoder, err := pd.getSubset(int(size))
+			if err != nil {
+				return err
+			}
+			var records Records
+			if err := records.decode(recordsDecoder); err != nil {
+				return err
+			}
+			r.records[topic][partition] = records
+		}
+	}
+
+	return nil
+}
+
+func (r *ProduceRequest) key() int16 {
+	return 0
+}
+
+func (r *ProduceRequest) version() int16 {
+	return r.Version
+}
+
+func (r *ProduceRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_9_0_0
+	case 2:
+		return V0_10_0_0
+	case 3:
+		return V0_11_0_0
+	default:
+		return MinVersion
+	}
+}
+
+func (r *ProduceRequest) ensureRecords(topic string, partition int32) {
+	if r.records == nil {
+		r.records = make(map[string]map[int32]Records)
+	}
+
+	if r.records[topic] == nil {
+		r.records[topic] = make(map[int32]Records)
+	}
+}
+
+func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
+	r.ensureRecords(topic, partition)
+	set := r.records[topic][partition].MsgSet
+
+	if set == nil {
+		set = new(MessageSet)
+		r.records[topic][partition] = newLegacyRecords(set)
+	}
+
+	set.addMessage(msg)
+}
+
+func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
+	r.ensureRecords(topic, partition)
+	r.records[topic][partition] = newLegacyRecords(set)
+}
+
+func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) {
+	r.ensureRecords(topic, partition)
+	r.records[topic][partition] = newDefaultRecords(batch)
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go
new file mode 100644
index 0000000..4c5cd35
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_response.go
@@ -0,0 +1,189 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+type ProduceResponseBlock struct {
+	Err    KError
+	Offset int64
+	// only provided if Version >= 2 and the broker is configured with `LogAppendTime`
+	Timestamp time.Time
+}
+
+func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+	tmp, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	b.Err = KError(tmp)
+
+	b.Offset, err = pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	if version >= 2 {
+		if millis, err := pd.getInt64(); err != nil {
+			return err
+		} else if millis != -1 {
+			b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
+		}
+	}
+
+	return nil
+}
+
+func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+	pe.putInt16(int16(b.Err))
+	pe.putInt64(b.Offset)
+
+	if version >= 2 {
+		timestamp := int64(-1)
+		if !b.Timestamp.Before(time.Unix(0, 0)) {
+			timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond)
+		} else if !b.Timestamp.IsZero() {
+			return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)}
+		}
+		pe.putInt64(timestamp)
+	}
+
+	return nil
+}
+
+type ProduceResponse struct {
+	Blocks       map[string]map[int32]*ProduceResponseBlock
+	Version      int16
+	ThrottleTime time.Duration // only provided if Version >= 1
+}
+
+func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numBlocks, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
+
+		for j := 0; j < numBlocks; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			block := new(ProduceResponseBlock)
+			err = block.decode(pd, version)
+			if err != nil {
+				return err
+			}
+			r.Blocks[name][id] = block
+		}
+	}
+
+	if r.Version >= 1 {
+		millis, err := pd.getInt32()
+		if err != nil {
+			return err
+		}
+
+		r.ThrottleTime = time.Duration(millis) * time.Millisecond
+	}
+
+	return nil
+}
+
+func (r *ProduceResponse) encode(pe packetEncoder) error {
+	err := pe.putArrayLength(len(r.Blocks))
+	if err != nil {
+		return err
+	}
+	for topic, partitions := range r.Blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+		for id, prb := range partitions {
+			pe.putInt32(id)
+			err = prb.encode(pe, r.Version)
+			if err != nil {
+				return err
+			}
+		}
+	}
+	if r.Version >= 1 {
+		pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+	}
+	return nil
+}
+
+func (r *ProduceResponse) key() int16 {
+	return 0
+}
+
+func (r *ProduceResponse) version() int16 {
+	return r.Version
+}
+
+func (r *ProduceResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V0_9_0_0
+	case 2:
+		return V0_10_0_0
+	case 3:
+		return V0_11_0_0
+	default:
+		return MinVersion
+	}
+}
+
+func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
+	if r.Blocks == nil {
+		return nil
+	}
+
+	if r.Blocks[topic] == nil {
+		return nil
+	}
+
+	return r.Blocks[topic][partition]
+}
+
+// Testing API
+
+func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
+	}
+	byTopic, ok := r.Blocks[topic]
+	if !ok {
+		byTopic = make(map[int32]*ProduceResponseBlock)
+		r.Blocks[topic] = byTopic
+	}
+	block := &ProduceResponseBlock{
+		Err: err,
+	}
+	if r.Version >= 2 {
+		block.Timestamp = time.Now()
+	}
+	byTopic[partition] = block
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go
new file mode 100644
index 0000000..4b42d9c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_set.go
@@ -0,0 +1,258 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"errors"
+	"time"
+)
+
+type partitionSet struct {
+	msgs          []*ProducerMessage
+	recordsToSend Records
+	bufferBytes   int
+}
+
+type produceSet struct {
+	parent *asyncProducer
+	msgs   map[string]map[int32]*partitionSet
+
+	bufferBytes int
+	bufferCount int
+}
+
+func newProduceSet(parent *asyncProducer) *produceSet {
+	return &produceSet{
+		msgs:   make(map[string]map[int32]*partitionSet),
+		parent: parent,
+	}
+}
+
+func (ps *produceSet) add(msg *ProducerMessage) error {
+	var err error
+	var key, val []byte
+
+	if msg.Key != nil {
+		if key, err = msg.Key.Encode(); err != nil {
+			return err
+		}
+	}
+
+	if msg.Value != nil {
+		if val, err = msg.Value.Encode(); err != nil {
+			return err
+		}
+	}
+
+	timestamp := msg.Timestamp
+	if msg.Timestamp.IsZero() {
+		timestamp = time.Now()
+	}
+
+	partitions := ps.msgs[msg.Topic]
+	if partitions == nil {
+		partitions = make(map[int32]*partitionSet)
+		ps.msgs[msg.Topic] = partitions
+	}
+
+	var size int
+
+	set := partitions[msg.Partition]
+	if set == nil {
+		if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+			batch := &RecordBatch{
+				FirstTimestamp:   timestamp,
+				Version:          2,
+				Codec:            ps.parent.conf.Producer.Compression,
+				CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
+				ProducerID:       ps.parent.txnmgr.producerID,
+				ProducerEpoch:    ps.parent.txnmgr.producerEpoch,
+			}
+			if ps.parent.conf.Producer.Idempotent {
+				batch.FirstSequence = msg.sequenceNumber
+			}
+			set = &partitionSet{recordsToSend: newDefaultRecords(batch)}
+			size = recordBatchOverhead
+		} else {
+			set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))}
+		}
+		partitions[msg.Partition] = set
+	}
+	set.msgs = append(set.msgs, msg)
+
+	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+		if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence {
+			return errors.New("Assertion failed: Message out of sequence added to a batch")
+		}
+		// We are being conservative here to avoid having to prep encode the record
+		size += maximumRecordOverhead
+		rec := &Record{
+			Key:            key,
+			Value:          val,
+			TimestampDelta: timestamp.Sub(set.recordsToSend.RecordBatch.FirstTimestamp),
+		}
+		size += len(key) + len(val)
+		if len(msg.Headers) > 0 {
+			rec.Headers = make([]*RecordHeader, len(msg.Headers))
+			for i := range msg.Headers {
+				rec.Headers[i] = &msg.Headers[i]
+				size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32
+			}
+		}
+		set.recordsToSend.RecordBatch.addRecord(rec)
+	} else {
+		msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
+		if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+			msgToSend.Timestamp = timestamp
+			msgToSend.Version = 1
+		}
+		set.recordsToSend.MsgSet.addMessage(msgToSend)
+		size = producerMessageOverhead + len(key) + len(val)
+	}
+
+	set.bufferBytes += size
+	ps.bufferBytes += size
+	ps.bufferCount++
+
+	return nil
+}
+
+func (ps *produceSet) buildRequest() *ProduceRequest {
+	req := &ProduceRequest{
+		RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
+		Timeout:      int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
+	}
+	if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+		req.Version = 2
+	}
+	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+		req.Version = 3
+	}
+
+	for topic, partitionSets := range ps.msgs {
+		for partition, set := range partitionSets {
+			if req.Version >= 3 {
+				// If the API version we're hitting is 3 or greater, we need to calculate
+				// offsets for each record in the batch relative to FirstOffset.
+				// Additionally, we must set LastOffsetDelta to the value of the last offset
+				// in the batch. Since the OffsetDelta of the first record is 0, we know that the
+				// final record of any batch will have an offset of (# of records in batch) - 1.
+				// (See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets
+				//  under the RecordBatch section for details.)
+				rb := set.recordsToSend.RecordBatch
+				if len(rb.Records) > 0 {
+					rb.LastOffsetDelta = int32(len(rb.Records) - 1)
+					for i, record := range rb.Records {
+						record.OffsetDelta = int64(i)
+					}
+				}
+				req.AddBatch(topic, partition, rb)
+				continue
+			}
+			if ps.parent.conf.Producer.Compression == CompressionNone {
+				req.AddSet(topic, partition, set.recordsToSend.MsgSet)
+			} else {
+				// When compression is enabled, the entire set for each partition is compressed
+				// and sent as the payload of a single fake "message" with the appropriate codec
+				// set and no key. When the server sees a message with a compression codec, it
+				// decompresses the payload and treats the result as its message set.
+
+				if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+					// If our version is 0.10 or later, assign relative offsets
+					// to the inner messages. This lets the broker avoid
+					// recompressing the message set.
+					// (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets
+					// for details on relative offsets.)
+					for i, msg := range set.recordsToSend.MsgSet.Messages {
+						msg.Offset = int64(i)
+					}
+				}
+				payload, err := encode(set.recordsToSend.MsgSet, ps.parent.conf.MetricRegistry)
+				if err != nil {
+					Logger.Println(err) // if this happens, it's basically our fault.
+					panic(err)
+				}
+				compMsg := &Message{
+					Codec:            ps.parent.conf.Producer.Compression,
+					CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
+					Key:              nil,
+					Value:            payload,
+					Set:              set.recordsToSend.MsgSet, // Provide the underlying message set for accurate metrics
+				}
+				if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+					compMsg.Version = 1
+					compMsg.Timestamp = set.recordsToSend.MsgSet.Messages[0].Msg.Timestamp
+				}
+				req.AddMessage(topic, partition, compMsg)
+			}
+		}
+	}
+
+	return req
+}
+
+func (ps *produceSet) eachPartition(cb func(topic string, partition int32, pSet *partitionSet)) {
+	for topic, partitionSet := range ps.msgs {
+		for partition, set := range partitionSet {
+			cb(topic, partition, set)
+		}
+	}
+}
+
+func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
+	if ps.msgs[topic] == nil {
+		return nil
+	}
+	set := ps.msgs[topic][partition]
+	if set == nil {
+		return nil
+	}
+	ps.bufferBytes -= set.bufferBytes
+	ps.bufferCount -= len(set.msgs)
+	delete(ps.msgs[topic], partition)
+	return set.msgs
+}
+
+func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
+	version := 1
+	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+		version = 2
+	}
+
+	switch {
+	// Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
+	case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)):
+		return true
+	// Would we overflow the size-limit of a message-batch for this partition?
+	case ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
+		ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes:
+		return true
+	// Would we overflow simply in number of messages?
+	case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
+		return true
+	default:
+		return false
+	}
+}
+
+func (ps *produceSet) readyToFlush() bool {
+	switch {
+	// If we don't have any messages, nothing else matters
+	case ps.empty():
+		return false
+	// If all three config values are 0, we always flush as-fast-as-possible
+	case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
+		return true
+	// If we've passed the message trigger-point
+	case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
+		return true
+	// If we've passed the byte trigger-point
+	case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
+		return true
+	default:
+		return false
+	}
+}
+
+func (ps *produceSet) empty() bool {
+	return ps.bufferCount == 0
+}
diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go
new file mode 100644
index 0000000..23045e7
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/real_decoder.go
@@ -0,0 +1,324 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"math"
+)
+
+var errInvalidArrayLength = PacketDecodingError{"invalid array length"}
+var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"}
+var errInvalidByteSliceLengthType = PacketDecodingError{"invalid byteslice length type"}
+var errInvalidStringLength = PacketDecodingError{"invalid string length"}
+var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"}
+var errVarintOverflow = PacketDecodingError{"varint overflow"}
+var errInvalidBool = PacketDecodingError{"invalid bool"}
+
+type realDecoder struct {
+	raw   []byte
+	off   int
+	stack []pushDecoder
+}
+
+// primitives
+
+func (rd *realDecoder) getInt8() (int8, error) {
+	if rd.remaining() < 1 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int8(rd.raw[rd.off])
+	rd.off++
+	return tmp, nil
+}
+
+func (rd *realDecoder) getInt16() (int16, error) {
+	if rd.remaining() < 2 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
+	rd.off += 2
+	return tmp, nil
+}
+
+func (rd *realDecoder) getInt32() (int32, error) {
+	if rd.remaining() < 4 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+	rd.off += 4
+	return tmp, nil
+}
+
+func (rd *realDecoder) getInt64() (int64, error) {
+	if rd.remaining() < 8 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+	rd.off += 8
+	return tmp, nil
+}
+
+func (rd *realDecoder) getVarint() (int64, error) {
+	tmp, n := binary.Varint(rd.raw[rd.off:])
+	if n == 0 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	if n < 0 {
+		rd.off -= n
+		return -1, errVarintOverflow
+	}
+	rd.off += n
+	return tmp, nil
+}
+
+func (rd *realDecoder) getArrayLength() (int, error) {
+	if rd.remaining() < 4 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:])))
+	rd.off += 4
+	if tmp > rd.remaining() {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	} else if tmp > 2*math.MaxUint16 {
+		return -1, errInvalidArrayLength
+	}
+	return tmp, nil
+}
+
+func (rd *realDecoder) getBool() (bool, error) {
+	b, err := rd.getInt8()
+	if err != nil || b == 0 {
+		return false, err
+	}
+	if b != 1 {
+		return false, errInvalidBool
+	}
+	return true, nil
+}
+
+// collections
+
+func (rd *realDecoder) getBytes() ([]byte, error) {
+	tmp, err := rd.getInt32()
+	if err != nil {
+		return nil, err
+	}
+	if tmp == -1 {
+		return nil, nil
+	}
+
+	return rd.getRawBytes(int(tmp))
+}
+
+func (rd *realDecoder) getVarintBytes() ([]byte, error) {
+	tmp, err := rd.getVarint()
+	if err != nil {
+		return nil, err
+	}
+	if tmp == -1 {
+		return nil, nil
+	}
+
+	return rd.getRawBytes(int(tmp))
+}
+
+func (rd *realDecoder) getStringLength() (int, error) {
+	length, err := rd.getInt16()
+	if err != nil {
+		return 0, err
+	}
+
+	n := int(length)
+
+	switch {
+	case n < -1:
+		return 0, errInvalidStringLength
+	case n > rd.remaining():
+		rd.off = len(rd.raw)
+		return 0, ErrInsufficientData
+	}
+
+	return n, nil
+}
+
+func (rd *realDecoder) getString() (string, error) {
+	n, err := rd.getStringLength()
+	if err != nil || n == -1 {
+		return "", err
+	}
+
+	tmpStr := string(rd.raw[rd.off : rd.off+n])
+	rd.off += n
+	return tmpStr, nil
+}
+
+func (rd *realDecoder) getNullableString() (*string, error) {
+	n, err := rd.getStringLength()
+	if err != nil || n == -1 {
+		return nil, err
+	}
+
+	tmpStr := string(rd.raw[rd.off : rd.off+n])
+	rd.off += n
+	return &tmpStr, err
+}
+
+func (rd *realDecoder) getInt32Array() ([]int32, error) {
+	if rd.remaining() < 4 {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+	n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+	rd.off += 4
+
+	if rd.remaining() < 4*n {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+
+	if n == 0 {
+		return nil, nil
+	}
+
+	if n < 0 {
+		return nil, errInvalidArrayLength
+	}
+
+	ret := make([]int32, n)
+	for i := range ret {
+		ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+		rd.off += 4
+	}
+	return ret, nil
+}
+
+func (rd *realDecoder) getInt64Array() ([]int64, error) {
+	if rd.remaining() < 4 {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+	n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+	rd.off += 4
+
+	if rd.remaining() < 8*n {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+
+	if n == 0 {
+		return nil, nil
+	}
+
+	if n < 0 {
+		return nil, errInvalidArrayLength
+	}
+
+	ret := make([]int64, n)
+	for i := range ret {
+		ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+		rd.off += 8
+	}
+	return ret, nil
+}
+
+func (rd *realDecoder) getStringArray() ([]string, error) {
+	if rd.remaining() < 4 {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+	n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+	rd.off += 4
+
+	if n == 0 {
+		return nil, nil
+	}
+
+	if n < 0 {
+		return nil, errInvalidArrayLength
+	}
+
+	ret := make([]string, n)
+	for i := range ret {
+		str, err := rd.getString()
+		if err != nil {
+			return nil, err
+		}
+
+		ret[i] = str
+	}
+	return ret, nil
+}
+
+// subsets
+
+func (rd *realDecoder) remaining() int {
+	return len(rd.raw) - rd.off
+}
+
+func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
+	buf, err := rd.getRawBytes(length)
+	if err != nil {
+		return nil, err
+	}
+	return &realDecoder{raw: buf}, nil
+}
+
+func (rd *realDecoder) getRawBytes(length int) ([]byte, error) {
+	if length < 0 {
+		return nil, errInvalidByteSliceLength
+	} else if length > rd.remaining() {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+
+	start := rd.off
+	rd.off += length
+	return rd.raw[start:rd.off], nil
+}
+
+func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) {
+	if rd.remaining() < offset+length {
+		return nil, ErrInsufficientData
+	}
+	off := rd.off + offset
+	return &realDecoder{raw: rd.raw[off : off+length]}, nil
+}
+
+// stacks
+
+func (rd *realDecoder) push(in pushDecoder) error {
+	in.saveOffset(rd.off)
+
+	var reserve int
+	if dpd, ok := in.(dynamicPushDecoder); ok {
+		if err := dpd.decode(rd); err != nil {
+			return err
+		}
+	} else {
+		reserve = in.reserveLength()
+		if rd.remaining() < reserve {
+			rd.off = len(rd.raw)
+			return ErrInsufficientData
+		}
+	}
+
+	rd.stack = append(rd.stack, in)
+
+	rd.off += reserve
+
+	return nil
+}
+
+func (rd *realDecoder) pop() error {
+	// this is go's ugly pop pattern (the inverse of append)
+	in := rd.stack[len(rd.stack)-1]
+	rd.stack = rd.stack[:len(rd.stack)-1]
+
+	return in.check(rd.off, rd.raw)
+}
diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go
new file mode 100644
index 0000000..3c75387
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/real_encoder.go
@@ -0,0 +1,156 @@
+package sarama
+
+import (
+	"encoding/binary"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+type realEncoder struct {
+	raw      []byte
+	off      int
+	stack    []pushEncoder
+	registry metrics.Registry
+}
+
+// primitives
+
+func (re *realEncoder) putInt8(in int8) {
+	re.raw[re.off] = byte(in)
+	re.off++
+}
+
+func (re *realEncoder) putInt16(in int16) {
+	binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
+	re.off += 2
+}
+
+func (re *realEncoder) putInt32(in int32) {
+	binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
+	re.off += 4
+}
+
+func (re *realEncoder) putInt64(in int64) {
+	binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
+	re.off += 8
+}
+
+func (re *realEncoder) putVarint(in int64) {
+	re.off += binary.PutVarint(re.raw[re.off:], in)
+}
+
+func (re *realEncoder) putArrayLength(in int) error {
+	re.putInt32(int32(in))
+	return nil
+}
+
+func (re *realEncoder) putBool(in bool) {
+	if in {
+		re.putInt8(1)
+		return
+	}
+	re.putInt8(0)
+}
+
+// collection
+
+func (re *realEncoder) putRawBytes(in []byte) error {
+	copy(re.raw[re.off:], in)
+	re.off += len(in)
+	return nil
+}
+
+func (re *realEncoder) putBytes(in []byte) error {
+	if in == nil {
+		re.putInt32(-1)
+		return nil
+	}
+	re.putInt32(int32(len(in)))
+	return re.putRawBytes(in)
+}
+
+func (re *realEncoder) putVarintBytes(in []byte) error {
+	if in == nil {
+		re.putVarint(-1)
+		return nil
+	}
+	re.putVarint(int64(len(in)))
+	return re.putRawBytes(in)
+}
+
+func (re *realEncoder) putString(in string) error {
+	re.putInt16(int16(len(in)))
+	copy(re.raw[re.off:], in)
+	re.off += len(in)
+	return nil
+}
+
+func (re *realEncoder) putNullableString(in *string) error {
+	if in == nil {
+		re.putInt16(-1)
+		return nil
+	}
+	return re.putString(*in)
+}
+
+func (re *realEncoder) putStringArray(in []string) error {
+	err := re.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+
+	for _, val := range in {
+		if err := re.putString(val); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (re *realEncoder) putInt32Array(in []int32) error {
+	err := re.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	for _, val := range in {
+		re.putInt32(val)
+	}
+	return nil
+}
+
+func (re *realEncoder) putInt64Array(in []int64) error {
+	err := re.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	for _, val := range in {
+		re.putInt64(val)
+	}
+	return nil
+}
+
+func (re *realEncoder) offset() int {
+	return re.off
+}
+
+// stacks
+
+func (re *realEncoder) push(in pushEncoder) {
+	in.saveOffset(re.off)
+	re.off += in.reserveLength()
+	re.stack = append(re.stack, in)
+}
+
+func (re *realEncoder) pop() error {
+	// this is go's ugly pop pattern (the inverse of append)
+	in := re.stack[len(re.stack)-1]
+	re.stack = re.stack[:len(re.stack)-1]
+
+	return in.run(re.off, re.raw)
+}
+
+// we do record metrics during the real encoder pass
+func (re *realEncoder) metricRegistry() metrics.Registry {
+	return re.registry
+}
diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/Shopify/sarama/record.go
new file mode 100644
index 0000000..cded308
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/record.go
@@ -0,0 +1,113 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"time"
+)
+
+const (
+	controlMask           = 0x20
+	maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1
+)
+
+type RecordHeader struct {
+	Key   []byte
+	Value []byte
+}
+
+func (h *RecordHeader) encode(pe packetEncoder) error {
+	if err := pe.putVarintBytes(h.Key); err != nil {
+		return err
+	}
+	return pe.putVarintBytes(h.Value)
+}
+
+func (h *RecordHeader) decode(pd packetDecoder) (err error) {
+	if h.Key, err = pd.getVarintBytes(); err != nil {
+		return err
+	}
+
+	if h.Value, err = pd.getVarintBytes(); err != nil {
+		return err
+	}
+	return nil
+}
+
+type Record struct {
+	Attributes     int8
+	TimestampDelta time.Duration
+	OffsetDelta    int64
+	Key            []byte
+	Value          []byte
+	Headers        []*RecordHeader
+
+	length varintLengthField
+}
+
+func (r *Record) encode(pe packetEncoder) error {
+	pe.push(&r.length)
+	pe.putInt8(r.Attributes)
+	pe.putVarint(int64(r.TimestampDelta / time.Millisecond))
+	pe.putVarint(r.OffsetDelta)
+	if err := pe.putVarintBytes(r.Key); err != nil {
+		return err
+	}
+	if err := pe.putVarintBytes(r.Value); err != nil {
+		return err
+	}
+	pe.putVarint(int64(len(r.Headers)))
+
+	for _, h := range r.Headers {
+		if err := h.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return pe.pop()
+}
+
+func (r *Record) decode(pd packetDecoder) (err error) {
+	if err = pd.push(&r.length); err != nil {
+		return err
+	}
+
+	if r.Attributes, err = pd.getInt8(); err != nil {
+		return err
+	}
+
+	timestamp, err := pd.getVarint()
+	if err != nil {
+		return err
+	}
+	r.TimestampDelta = time.Duration(timestamp) * time.Millisecond
+
+	if r.OffsetDelta, err = pd.getVarint(); err != nil {
+		return err
+	}
+
+	if r.Key, err = pd.getVarintBytes(); err != nil {
+		return err
+	}
+
+	if r.Value, err = pd.getVarintBytes(); err != nil {
+		return err
+	}
+
+	numHeaders, err := pd.getVarint()
+	if err != nil {
+		return err
+	}
+
+	if numHeaders >= 0 {
+		r.Headers = make([]*RecordHeader, numHeaders)
+	}
+	for i := int64(0); i < numHeaders; i++ {
+		hdr := new(RecordHeader)
+		if err := hdr.decode(pd); err != nil {
+			return err
+		}
+		r.Headers[i] = hdr
+	}
+
+	return pd.pop()
+}
diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go
new file mode 100644
index 0000000..e0f183f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/record_batch.go
@@ -0,0 +1,208 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+const recordBatchOverhead = 49
+
+type recordsArray []*Record
+
+func (e recordsArray) encode(pe packetEncoder) error {
+	for _, r := range e {
+		if err := r.encode(pe); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (e recordsArray) decode(pd packetDecoder) error {
+	for i := range e {
+		rec := &Record{}
+		if err := rec.decode(pd); err != nil {
+			return err
+		}
+		e[i] = rec
+	}
+	return nil
+}
+
+type RecordBatch struct {
+	FirstOffset           int64
+	PartitionLeaderEpoch  int32
+	Version               int8
+	Codec                 CompressionCodec
+	CompressionLevel      int
+	Control               bool
+	LastOffsetDelta       int32
+	FirstTimestamp        time.Time
+	MaxTimestamp          time.Time
+	ProducerID            int64
+	ProducerEpoch         int16
+	FirstSequence         int32
+	Records               []*Record
+	PartialTrailingRecord bool
+
+	compressedRecords []byte
+	recordsLen        int // uncompressed records size
+}
+
+func (b *RecordBatch) encode(pe packetEncoder) error {
+	if b.Version != 2 {
+		return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)}
+	}
+	pe.putInt64(b.FirstOffset)
+	pe.push(&lengthField{})
+	pe.putInt32(b.PartitionLeaderEpoch)
+	pe.putInt8(b.Version)
+	pe.push(newCRC32Field(crcCastagnoli))
+	pe.putInt16(b.computeAttributes())
+	pe.putInt32(b.LastOffsetDelta)
+
+	if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil {
+		return err
+	}
+
+	if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil {
+		return err
+	}
+
+	pe.putInt64(b.ProducerID)
+	pe.putInt16(b.ProducerEpoch)
+	pe.putInt32(b.FirstSequence)
+
+	if err := pe.putArrayLength(len(b.Records)); err != nil {
+		return err
+	}
+
+	if b.compressedRecords == nil {
+		if err := b.encodeRecords(pe); err != nil {
+			return err
+		}
+	}
+	if err := pe.putRawBytes(b.compressedRecords); err != nil {
+		return err
+	}
+
+	if err := pe.pop(); err != nil {
+		return err
+	}
+	return pe.pop()
+}
+
+func (b *RecordBatch) decode(pd packetDecoder) (err error) {
+	if b.FirstOffset, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	batchLen, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	if b.Version, err = pd.getInt8(); err != nil {
+		return err
+	}
+
+	if err = pd.push(&crc32Field{polynomial: crcCastagnoli}); err != nil {
+		return err
+	}
+
+	attributes, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask)
+	b.Control = attributes&controlMask == controlMask
+
+	if b.LastOffsetDelta, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil {
+		return err
+	}
+
+	if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil {
+		return err
+	}
+
+	if b.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	if b.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	if b.FirstSequence, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	numRecs, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if numRecs >= 0 {
+		b.Records = make([]*Record, numRecs)
+	}
+
+	bufSize := int(batchLen) - recordBatchOverhead
+	recBuffer, err := pd.getRawBytes(bufSize)
+	if err != nil {
+		if err == ErrInsufficientData {
+			b.PartialTrailingRecord = true
+			b.Records = nil
+			return nil
+		}
+		return err
+	}
+
+	if err = pd.pop(); err != nil {
+		return err
+	}
+
+	recBuffer, err = decompress(b.Codec, recBuffer)
+	if err != nil {
+		return err
+	}
+
+	b.recordsLen = len(recBuffer)
+	err = decode(recBuffer, recordsArray(b.Records))
+	if err == ErrInsufficientData {
+		b.PartialTrailingRecord = true
+		b.Records = nil
+		return nil
+	}
+	return err
+}
+
+func (b *RecordBatch) encodeRecords(pe packetEncoder) error {
+	var raw []byte
+	var err error
+	if raw, err = encode(recordsArray(b.Records), pe.metricRegistry()); err != nil {
+		return err
+	}
+	b.recordsLen = len(raw)
+
+	b.compressedRecords, err = compress(b.Codec, b.CompressionLevel, raw)
+	return err
+}
+
+func (b *RecordBatch) computeAttributes() int16 {
+	attr := int16(b.Codec) & int16(compressionCodecMask)
+	if b.Control {
+		attr |= controlMask
+	}
+	return attr
+}
+
+func (b *RecordBatch) addRecord(r *Record) {
+	b.Records = append(b.Records, r)
+}
diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go
new file mode 100644
index 0000000..192f592
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/records.go
@@ -0,0 +1,194 @@
+package sarama
+
+import "fmt"
+
+const (
+	unknownRecords = iota
+	legacyRecords
+	defaultRecords
+
+	magicOffset = 16
+	magicLength = 1
+)
+
+// Records implements a union type containing either a RecordBatch or a legacy MessageSet.
+type Records struct {
+	recordsType int
+	MsgSet      *MessageSet
+	RecordBatch *RecordBatch
+}
+
+func newLegacyRecords(msgSet *MessageSet) Records {
+	return Records{recordsType: legacyRecords, MsgSet: msgSet}
+}
+
+func newDefaultRecords(batch *RecordBatch) Records {
+	return Records{recordsType: defaultRecords, RecordBatch: batch}
+}
+
+// setTypeFromFields sets type of Records depending on which of MsgSet or RecordBatch is not nil.
+// The first return value indicates whether both fields are nil (and the type is not set).
+// If both fields are not nil, it returns an error.
+func (r *Records) setTypeFromFields() (bool, error) {
+	if r.MsgSet == nil && r.RecordBatch == nil {
+		return true, nil
+	}
+	if r.MsgSet != nil && r.RecordBatch != nil {
+		return false, fmt.Errorf("both MsgSet and RecordBatch are set, but record type is unknown")
+	}
+	r.recordsType = defaultRecords
+	if r.MsgSet != nil {
+		r.recordsType = legacyRecords
+	}
+	return false, nil
+}
+
+func (r *Records) encode(pe packetEncoder) error {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return err
+		}
+	}
+
+	switch r.recordsType {
+	case legacyRecords:
+		if r.MsgSet == nil {
+			return nil
+		}
+		return r.MsgSet.encode(pe)
+	case defaultRecords:
+		if r.RecordBatch == nil {
+			return nil
+		}
+		return r.RecordBatch.encode(pe)
+	}
+
+	return fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) setTypeFromMagic(pd packetDecoder) error {
+	magic, err := magicValue(pd)
+	if err != nil {
+		return err
+	}
+
+	r.recordsType = defaultRecords
+	if magic < 2 {
+		r.recordsType = legacyRecords
+	}
+
+	return nil
+}
+
+func (r *Records) decode(pd packetDecoder) error {
+	if r.recordsType == unknownRecords {
+		if err := r.setTypeFromMagic(pd); err != nil {
+			return err
+		}
+	}
+
+	switch r.recordsType {
+	case legacyRecords:
+		r.MsgSet = &MessageSet{}
+		return r.MsgSet.decode(pd)
+	case defaultRecords:
+		r.RecordBatch = &RecordBatch{}
+		return r.RecordBatch.decode(pd)
+	}
+	return fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) numRecords() (int, error) {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return 0, err
+		}
+	}
+
+	switch r.recordsType {
+	case legacyRecords:
+		if r.MsgSet == nil {
+			return 0, nil
+		}
+		return len(r.MsgSet.Messages), nil
+	case defaultRecords:
+		if r.RecordBatch == nil {
+			return 0, nil
+		}
+		return len(r.RecordBatch.Records), nil
+	}
+	return 0, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isPartial() (bool, error) {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return false, err
+		}
+	}
+
+	switch r.recordsType {
+	case unknownRecords:
+		return false, nil
+	case legacyRecords:
+		if r.MsgSet == nil {
+			return false, nil
+		}
+		return r.MsgSet.PartialTrailingMessage, nil
+	case defaultRecords:
+		if r.RecordBatch == nil {
+			return false, nil
+		}
+		return r.RecordBatch.PartialTrailingRecord, nil
+	}
+	return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isControl() (bool, error) {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return false, err
+		}
+	}
+
+	switch r.recordsType {
+	case legacyRecords:
+		return false, nil
+	case defaultRecords:
+		if r.RecordBatch == nil {
+			return false, nil
+		}
+		return r.RecordBatch.Control, nil
+	}
+	return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isOverflow() (bool, error) {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return false, err
+		}
+	}
+
+	switch r.recordsType {
+	case unknownRecords:
+		return false, nil
+	case legacyRecords:
+		if r.MsgSet == nil {
+			return false, nil
+		}
+		return r.MsgSet.OverflowMessage, nil
+	case defaultRecords:
+		return false, nil
+	}
+	return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func magicValue(pd packetDecoder) (int8, error) {
+	dec, err := pd.peek(magicOffset, magicLength)
+	if err != nil {
+		return 0, err
+	}
+
+	return dec.getInt8()
+}
diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go
new file mode 100644
index 0000000..4d211a1
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/request.go
@@ -0,0 +1,149 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+)
+
+type protocolBody interface {
+	encoder
+	versionedDecoder
+	key() int16
+	version() int16
+	requiredVersion() KafkaVersion
+}
+
+type request struct {
+	correlationID int32
+	clientID      string
+	body          protocolBody
+}
+
+func (r *request) encode(pe packetEncoder) (err error) {
+	pe.push(&lengthField{})
+	pe.putInt16(r.body.key())
+	pe.putInt16(r.body.version())
+	pe.putInt32(r.correlationID)
+	err = pe.putString(r.clientID)
+	if err != nil {
+		return err
+	}
+	err = r.body.encode(pe)
+	if err != nil {
+		return err
+	}
+	return pe.pop()
+}
+
+func (r *request) decode(pd packetDecoder) (err error) {
+	var key int16
+	if key, err = pd.getInt16(); err != nil {
+		return err
+	}
+	var version int16
+	if version, err = pd.getInt16(); err != nil {
+		return err
+	}
+	if r.correlationID, err = pd.getInt32(); err != nil {
+		return err
+	}
+	r.clientID, err = pd.getString()
+
+	r.body = allocateBody(key, version)
+	if r.body == nil {
+		return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
+	}
+	return r.body.decode(pd, version)
+}
+
+func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) {
+	lengthBytes := make([]byte, 4)
+	if _, err := io.ReadFull(r, lengthBytes); err != nil {
+		return nil, bytesRead, err
+	}
+	bytesRead += len(lengthBytes)
+
+	length := int32(binary.BigEndian.Uint32(lengthBytes))
+	if length <= 4 || length > MaxRequestSize {
+		return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
+	}
+
+	encodedReq := make([]byte, length)
+	if _, err := io.ReadFull(r, encodedReq); err != nil {
+		return nil, bytesRead, err
+	}
+	bytesRead += len(encodedReq)
+
+	req = &request{}
+	if err := decode(encodedReq, req); err != nil {
+		return nil, bytesRead, err
+	}
+	return req, bytesRead, nil
+}
+
+func allocateBody(key, version int16) protocolBody {
+	switch key {
+	case 0:
+		return &ProduceRequest{}
+	case 1:
+		return &FetchRequest{}
+	case 2:
+		return &OffsetRequest{Version: version}
+	case 3:
+		return &MetadataRequest{}
+	case 8:
+		return &OffsetCommitRequest{Version: version}
+	case 9:
+		return &OffsetFetchRequest{}
+	case 10:
+		return &FindCoordinatorRequest{}
+	case 11:
+		return &JoinGroupRequest{}
+	case 12:
+		return &HeartbeatRequest{}
+	case 13:
+		return &LeaveGroupRequest{}
+	case 14:
+		return &SyncGroupRequest{}
+	case 15:
+		return &DescribeGroupsRequest{}
+	case 16:
+		return &ListGroupsRequest{}
+	case 17:
+		return &SaslHandshakeRequest{}
+	case 18:
+		return &ApiVersionsRequest{}
+	case 19:
+		return &CreateTopicsRequest{}
+	case 20:
+		return &DeleteTopicsRequest{}
+	case 21:
+		return &DeleteRecordsRequest{}
+	case 22:
+		return &InitProducerIDRequest{}
+	case 24:
+		return &AddPartitionsToTxnRequest{}
+	case 25:
+		return &AddOffsetsToTxnRequest{}
+	case 26:
+		return &EndTxnRequest{}
+	case 28:
+		return &TxnOffsetCommitRequest{}
+	case 29:
+		return &DescribeAclsRequest{}
+	case 30:
+		return &CreateAclsRequest{}
+	case 31:
+		return &DeleteAclsRequest{}
+	case 32:
+		return &DescribeConfigsRequest{}
+	case 33:
+		return &AlterConfigsRequest{}
+	case 37:
+		return &CreatePartitionsRequest{}
+	case 42:
+		return &DeleteGroupsRequest{}
+	}
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go
new file mode 100644
index 0000000..f3f4d27
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/response_header.go
@@ -0,0 +1,21 @@
+package sarama
+
+import "fmt"
+
+type responseHeader struct {
+	length        int32
+	correlationID int32
+}
+
+func (r *responseHeader) decode(pd packetDecoder) (err error) {
+	r.length, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+	if r.length <= 4 || r.length > MaxResponseSize {
+		return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)}
+	}
+
+	r.correlationID, err = pd.getInt32()
+	return err
+}
diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go
new file mode 100644
index 0000000..7d5dc60
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sarama.go
@@ -0,0 +1,99 @@
+/*
+Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level
+API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level
+API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation.
+
+To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel
+and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases.
+The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be
+useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees
+depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the
+SyncProducer can still sometimes be lost.
+
+To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic
+consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the
+https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9
+and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
+
+For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
+and message sent on the wire; the Client provides higher-level metadata management that is shared between
+the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up
+exactly with the protocol fields documented by Kafka at
+https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
+
+Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry.
+
+Broker related metrics:
+
+	+----------------------------------------------+------------+---------------------------------------------------------------+
+	| Name                                         | Type       | Description                                                   |
+	+----------------------------------------------+------------+---------------------------------------------------------------+
+	| incoming-byte-rate                           | meter      | Bytes/second read off all brokers                             |
+	| incoming-byte-rate-for-broker-<broker-id>    | meter      | Bytes/second read off a given broker                          |
+	| outgoing-byte-rate                           | meter      | Bytes/second written off all brokers                          |
+	| outgoing-byte-rate-for-broker-<broker-id>    | meter      | Bytes/second written off a given broker                       |
+	| request-rate                                 | meter      | Requests/second sent to all brokers                           |
+	| request-rate-for-broker-<broker-id>          | meter      | Requests/second sent to a given broker                        |
+	| request-size                                 | histogram  | Distribution of the request size in bytes for all brokers     |
+	| request-size-for-broker-<broker-id>          | histogram  | Distribution of the request size in bytes for a given broker  |
+	| request-latency-in-ms                        | histogram  | Distribution of the request latency in ms for all brokers     |
+	| request-latency-in-ms-for-broker-<broker-id> | histogram  | Distribution of the request latency in ms for a given broker  |
+	| response-rate                                | meter      | Responses/second received from all brokers                    |
+	| response-rate-for-broker-<broker-id>         | meter      | Responses/second received from a given broker                 |
+	| response-size                                | histogram  | Distribution of the response size in bytes for all brokers    |
+	| response-size-for-broker-<broker-id>         | histogram  | Distribution of the response size in bytes for a given broker |
+	+----------------------------------------------+------------+---------------------------------------------------------------+
+
+Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics.
+
+Producer related metrics:
+
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+	| Name                                      | Type       | Description                                                                          |
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+	| batch-size                                | histogram  | Distribution of the number of bytes sent per partition per request for all topics    |
+	| batch-size-for-topic-<topic>              | histogram  | Distribution of the number of bytes sent per partition per request for a given topic |
+	| record-send-rate                          | meter      | Records/second sent to all topics                                                    |
+	| record-send-rate-for-topic-<topic>        | meter      | Records/second sent to a given topic                                                 |
+	| records-per-request                       | histogram  | Distribution of the number of records sent per request for all topics                |
+	| records-per-request-for-topic-<topic>     | histogram  | Distribution of the number of records sent per request for a given topic             |
+	| compression-ratio                         | histogram  | Distribution of the compression ratio times 100 of record batches for all topics     |
+	| compression-ratio-for-topic-<topic>       | histogram  | Distribution of the compression ratio times 100 of record batches for a given topic  |
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+
+*/
+package sarama
+
+import (
+	"io/ioutil"
+	"log"
+)
+
+// Logger is the instance of a StdLogger interface that Sarama writes connection
+// management events to. By default it is set to discard all log messages via ioutil.Discard,
+// but you can set it to redirect wherever you want.
+var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags)
+
+// StdLogger is used to log error messages.
+type StdLogger interface {
+	Print(v ...interface{})
+	Printf(format string, v ...interface{})
+	Println(v ...interface{})
+}
+
+// PanicHandler is called for recovering from panics spawned internally to the library (and thus
+// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
+var PanicHandler func(interface{})
+
+// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
+// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
+// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
+// to process.
+var MaxRequestSize int32 = 100 * 1024 * 1024
+
+// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
+// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
+// protect the client from running out of memory. Please note that brokers do not have any natural limit on
+// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
+// (see https://issues.apache.org/jira/browse/KAFKA-2063).
+var MaxResponseSize int32 = 100 * 1024 * 1024
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
new file mode 100644
index 0000000..fbbc894
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
@@ -0,0 +1,33 @@
+package sarama
+
+type SaslHandshakeRequest struct {
+	Mechanism string
+}
+
+func (r *SaslHandshakeRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.Mechanism); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) {
+	if r.Mechanism, err = pd.getString(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *SaslHandshakeRequest) key() int16 {
+	return 17
+}
+
+func (r *SaslHandshakeRequest) version() int16 {
+	return 0
+}
+
+func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion {
+	return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
new file mode 100644
index 0000000..ef290d4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
@@ -0,0 +1,38 @@
+package sarama
+
+type SaslHandshakeResponse struct {
+	Err               KError
+	EnabledMechanisms []string
+}
+
+func (r *SaslHandshakeResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.Err))
+	return pe.putStringArray(r.EnabledMechanisms)
+}
+
+func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.Err = KError(kerr)
+
+	if r.EnabledMechanisms, err = pd.getStringArray(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *SaslHandshakeResponse) key() int16 {
+	return 17
+}
+
+func (r *SaslHandshakeResponse) version() int16 {
+	return 0
+}
+
+func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion {
+	return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go
new file mode 100644
index 0000000..fe20708
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_group_request.go
@@ -0,0 +1,100 @@
+package sarama
+
+type SyncGroupRequest struct {
+	GroupId          string
+	GenerationId     int32
+	MemberId         string
+	GroupAssignments map[string][]byte
+}
+
+func (r *SyncGroupRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.GroupId); err != nil {
+		return err
+	}
+
+	pe.putInt32(r.GenerationId)
+
+	if err := pe.putString(r.MemberId); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil {
+		return err
+	}
+	for memberId, memberAssignment := range r.GroupAssignments {
+		if err := pe.putString(memberId); err != nil {
+			return err
+		}
+		if err := pe.putBytes(memberAssignment); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+	if r.GroupId, err = pd.getString(); err != nil {
+		return
+	}
+	if r.GenerationId, err = pd.getInt32(); err != nil {
+		return
+	}
+	if r.MemberId, err = pd.getString(); err != nil {
+		return
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		return nil
+	}
+
+	r.GroupAssignments = make(map[string][]byte)
+	for i := 0; i < n; i++ {
+		memberId, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		memberAssignment, err := pd.getBytes()
+		if err != nil {
+			return err
+		}
+
+		r.GroupAssignments[memberId] = memberAssignment
+	}
+
+	return nil
+}
+
+func (r *SyncGroupRequest) key() int16 {
+	return 14
+}
+
+func (r *SyncGroupRequest) version() int16 {
+	return 0
+}
+
+func (r *SyncGroupRequest) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
+
+func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) {
+	if r.GroupAssignments == nil {
+		r.GroupAssignments = make(map[string][]byte)
+	}
+
+	r.GroupAssignments[memberId] = memberAssignment
+}
+
+func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error {
+	bin, err := encode(memberAssignment, nil)
+	if err != nil {
+		return err
+	}
+
+	r.AddGroupAssignment(memberId, bin)
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go
new file mode 100644
index 0000000..194b382
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_group_response.go
@@ -0,0 +1,41 @@
+package sarama
+
+type SyncGroupResponse struct {
+	Err              KError
+	MemberAssignment []byte
+}
+
+func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
+	assignment := new(ConsumerGroupMemberAssignment)
+	err := decode(r.MemberAssignment, assignment)
+	return assignment, err
+}
+
+func (r *SyncGroupResponse) encode(pe packetEncoder) error {
+	pe.putInt16(int16(r.Err))
+	return pe.putBytes(r.MemberAssignment)
+}
+
+func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+	kerr, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.Err = KError(kerr)
+
+	r.MemberAssignment, err = pd.getBytes()
+	return
+}
+
+func (r *SyncGroupResponse) key() int16 {
+	return 14
+}
+
+func (r *SyncGroupResponse) version() int16 {
+	return 0
+}
+
+func (r *SyncGroupResponse) requiredVersion() KafkaVersion {
+	return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go
new file mode 100644
index 0000000..021c5a0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_producer.go
@@ -0,0 +1,149 @@
+package sarama
+
+import "sync"
+
+// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct
+// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer
+// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope.
+//
+// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual
+// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`.
+// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost.
+//
+// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to
+// be set to true in its configuration.
+type SyncProducer interface {
+
+	// SendMessage produces a given message, and returns only when it either has
+	// succeeded or failed to produce. It will return the partition and the offset
+	// of the produced message, or an error if the message failed to produce.
+	SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error)
+
+	// SendMessages produces a given set of messages, and returns only when all
+	// messages in the set have either succeeded or failed. Note that messages
+	// can succeed and fail individually; if some succeed and some fail,
+	// SendMessages will return an error.
+	SendMessages(msgs []*ProducerMessage) error
+
+	// Close shuts down the producer and waits for any buffered messages to be
+	// flushed. You must call this function before a producer object passes out of
+	// scope, as it may otherwise leak memory. You must call this before calling
+	// Close on the underlying client.
+	Close() error
+}
+
+type syncProducer struct {
+	producer *asyncProducer
+	wg       sync.WaitGroup
+}
+
+// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration.
+func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) {
+	if config == nil {
+		config = NewConfig()
+		config.Producer.Return.Successes = true
+	}
+
+	if err := verifyProducerConfig(config); err != nil {
+		return nil, err
+	}
+
+	p, err := NewAsyncProducer(addrs, config)
+	if err != nil {
+		return nil, err
+	}
+	return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewSyncProducerFromClient(client Client) (SyncProducer, error) {
+	if err := verifyProducerConfig(client.Config()); err != nil {
+		return nil, err
+	}
+
+	p, err := NewAsyncProducerFromClient(client)
+	if err != nil {
+		return nil, err
+	}
+	return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer {
+	sp := &syncProducer{producer: p}
+
+	sp.wg.Add(2)
+	go withRecover(sp.handleSuccesses)
+	go withRecover(sp.handleErrors)
+
+	return sp
+}
+
+func verifyProducerConfig(config *Config) error {
+	if !config.Producer.Return.Errors {
+		return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer")
+	}
+	if !config.Producer.Return.Successes {
+		return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer")
+	}
+	return nil
+}
+
+func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) {
+	expectation := make(chan *ProducerError, 1)
+	msg.expectation = expectation
+	sp.producer.Input() <- msg
+
+	if err := <-expectation; err != nil {
+		return -1, -1, err.Err
+	}
+
+	return msg.Partition, msg.Offset, nil
+}
+
+func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error {
+	expectations := make(chan chan *ProducerError, len(msgs))
+	go func() {
+		for _, msg := range msgs {
+			expectation := make(chan *ProducerError, 1)
+			msg.expectation = expectation
+			sp.producer.Input() <- msg
+			expectations <- expectation
+		}
+		close(expectations)
+	}()
+
+	var errors ProducerErrors
+	for expectation := range expectations {
+		if err := <-expectation; err != nil {
+			errors = append(errors, err)
+		}
+	}
+
+	if len(errors) > 0 {
+		return errors
+	}
+	return nil
+}
+
+func (sp *syncProducer) handleSuccesses() {
+	defer sp.wg.Done()
+	for msg := range sp.producer.Successes() {
+		expectation := msg.expectation
+		expectation <- nil
+	}
+}
+
+func (sp *syncProducer) handleErrors() {
+	defer sp.wg.Done()
+	for err := range sp.producer.Errors() {
+		expectation := err.Msg.expectation
+		expectation <- err
+	}
+}
+
+func (sp *syncProducer) Close() error {
+	sp.producer.AsyncClose()
+	sp.wg.Wait()
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/Shopify/sarama/timestamp.go
new file mode 100644
index 0000000..372278d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/timestamp.go
@@ -0,0 +1,40 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+type Timestamp struct {
+	*time.Time
+}
+
+func (t Timestamp) encode(pe packetEncoder) error {
+	timestamp := int64(-1)
+
+	if !t.Before(time.Unix(0, 0)) {
+		timestamp = t.UnixNano() / int64(time.Millisecond)
+	} else if !t.IsZero() {
+		return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", t)}
+	}
+
+	pe.putInt64(timestamp)
+	return nil
+}
+
+func (t Timestamp) decode(pd packetDecoder) error {
+	millis, err := pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	// negative timestamps are invalid, in these cases we should return
+	// a zero time
+	timestamp := time.Time{}
+	if millis >= 0 {
+		timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
+	}
+
+	*t.Time = timestamp
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
new file mode 100644
index 0000000..71e95b8
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
@@ -0,0 +1,126 @@
+package sarama
+
+type TxnOffsetCommitRequest struct {
+	TransactionalID string
+	GroupID         string
+	ProducerID      int64
+	ProducerEpoch   int16
+	Topics          map[string][]*PartitionOffsetMetadata
+}
+
+func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(t.TransactionalID); err != nil {
+		return err
+	}
+	if err := pe.putString(t.GroupID); err != nil {
+		return err
+	}
+	pe.putInt64(t.ProducerID)
+	pe.putInt16(t.ProducerEpoch)
+
+	if err := pe.putArrayLength(len(t.Topics)); err != nil {
+		return err
+	}
+	for topic, partitions := range t.Topics {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for _, partition := range partitions {
+			if err := partition.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
+	if t.TransactionalID, err = pd.getString(); err != nil {
+		return err
+	}
+	if t.GroupID, err = pd.getString(); err != nil {
+		return err
+	}
+	if t.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if t.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	t.Topics = make(map[string][]*PartitionOffsetMetadata)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		m, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		t.Topics[topic] = make([]*PartitionOffsetMetadata, m)
+
+		for j := 0; j < m; j++ {
+			partitionOffsetMetadata := new(PartitionOffsetMetadata)
+			if err := partitionOffsetMetadata.decode(pd, version); err != nil {
+				return err
+			}
+			t.Topics[topic][j] = partitionOffsetMetadata
+		}
+	}
+
+	return nil
+}
+
+func (a *TxnOffsetCommitRequest) key() int16 {
+	return 28
+}
+
+func (a *TxnOffsetCommitRequest) version() int16 {
+	return 0
+}
+
+func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
+
+type PartitionOffsetMetadata struct {
+	Partition int32
+	Offset    int64
+	Metadata  *string
+}
+
+func (p *PartitionOffsetMetadata) encode(pe packetEncoder) error {
+	pe.putInt32(p.Partition)
+	pe.putInt64(p.Offset)
+	if err := pe.putNullableString(p.Metadata); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err error) {
+	if p.Partition, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if p.Offset, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if p.Metadata, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
new file mode 100644
index 0000000..6c980f4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
@@ -0,0 +1,83 @@
+package sarama
+
+import (
+	"time"
+)
+
+type TxnOffsetCommitResponse struct {
+	ThrottleTime time.Duration
+	Topics       map[string][]*PartitionError
+}
+
+func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error {
+	pe.putInt32(int32(t.ThrottleTime / time.Millisecond))
+	if err := pe.putArrayLength(len(t.Topics)); err != nil {
+		return err
+	}
+
+	for topic, e := range t.Topics {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(e)); err != nil {
+			return err
+		}
+		for _, partitionError := range e {
+			if err := partitionError.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	t.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	t.Topics = make(map[string][]*PartitionError)
+
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		m, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		t.Topics[topic] = make([]*PartitionError, m)
+
+		for j := 0; j < m; j++ {
+			t.Topics[topic][j] = new(PartitionError)
+			if err := t.Topics[topic][j].decode(pd, version); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (a *TxnOffsetCommitResponse) key() int16 {
+	return 28
+}
+
+func (a *TxnOffsetCommitResponse) version() int16 {
+	return 0
+}
+
+func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion {
+	return V0_11_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go
new file mode 100644
index 0000000..7dcbf03
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/utils.go
@@ -0,0 +1,220 @@
+package sarama
+
+import (
+	"bufio"
+	"fmt"
+	"net"
+	"regexp"
+)
+
+type none struct{}
+
+// make []int32 sortable so we can sort partition numbers
+type int32Slice []int32
+
+func (slice int32Slice) Len() int {
+	return len(slice)
+}
+
+func (slice int32Slice) Less(i, j int) bool {
+	return slice[i] < slice[j]
+}
+
+func (slice int32Slice) Swap(i, j int) {
+	slice[i], slice[j] = slice[j], slice[i]
+}
+
+func dupInt32Slice(input []int32) []int32 {
+	ret := make([]int32, 0, len(input))
+	for _, val := range input {
+		ret = append(ret, val)
+	}
+	return ret
+}
+
+func withRecover(fn func()) {
+	defer func() {
+		handler := PanicHandler
+		if handler != nil {
+			if err := recover(); err != nil {
+				handler(err)
+			}
+		}
+	}()
+
+	fn()
+}
+
+func safeAsyncClose(b *Broker) {
+	tmp := b // local var prevents clobbering in goroutine
+	go withRecover(func() {
+		if connected, _ := tmp.Connected(); connected {
+			if err := tmp.Close(); err != nil {
+				Logger.Println("Error closing broker", tmp.ID(), ":", err)
+			}
+		}
+	})
+}
+
+// Encoder is a simple interface for any type that can be encoded as an array of bytes
+// in order to be sent as the key or value of a Kafka message. Length() is provided as an
+// optimization, and must return the same as len() on the result of Encode().
+type Encoder interface {
+	Encode() ([]byte, error)
+	Length() int
+}
+
+// make strings and byte slices encodable for convenience so they can be used as keys
+// and/or values in kafka messages
+
+// StringEncoder implements the Encoder interface for Go strings so that they can be used
+// as the Key or Value in a ProducerMessage.
+type StringEncoder string
+
+func (s StringEncoder) Encode() ([]byte, error) {
+	return []byte(s), nil
+}
+
+func (s StringEncoder) Length() int {
+	return len(s)
+}
+
+// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used
+// as the Key or Value in a ProducerMessage.
+type ByteEncoder []byte
+
+func (b ByteEncoder) Encode() ([]byte, error) {
+	return b, nil
+}
+
+func (b ByteEncoder) Length() int {
+	return len(b)
+}
+
+// bufConn wraps a net.Conn with a buffer for reads to reduce the number of
+// reads that trigger syscalls.
+type bufConn struct {
+	net.Conn
+	buf *bufio.Reader
+}
+
+func newBufConn(conn net.Conn) *bufConn {
+	return &bufConn{
+		Conn: conn,
+		buf:  bufio.NewReader(conn),
+	}
+}
+
+func (bc *bufConn) Read(b []byte) (n int, err error) {
+	return bc.buf.Read(b)
+}
+
+// KafkaVersion instances represent versions of the upstream Kafka broker.
+type KafkaVersion struct {
+	// it's a struct rather than just typing the array directly to make it opaque and stop people
+	// generating their own arbitrary versions
+	version [4]uint
+}
+
+func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion {
+	return KafkaVersion{
+		version: [4]uint{major, minor, veryMinor, patch},
+	}
+}
+
+// IsAtLeast return true if and only if the version it is called on is
+// greater than or equal to the version passed in:
+//    V1.IsAtLeast(V2) // false
+//    V2.IsAtLeast(V1) // true
+func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool {
+	for i := range v.version {
+		if v.version[i] > other.version[i] {
+			return true
+		} else if v.version[i] < other.version[i] {
+			return false
+		}
+	}
+	return true
+}
+
+// Effective constants defining the supported kafka versions.
+var (
+	V0_8_2_0  = newKafkaVersion(0, 8, 2, 0)
+	V0_8_2_1  = newKafkaVersion(0, 8, 2, 1)
+	V0_8_2_2  = newKafkaVersion(0, 8, 2, 2)
+	V0_9_0_0  = newKafkaVersion(0, 9, 0, 0)
+	V0_9_0_1  = newKafkaVersion(0, 9, 0, 1)
+	V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
+	V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
+	V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
+	V0_10_1_1 = newKafkaVersion(0, 10, 1, 1)
+	V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
+	V0_10_2_1 = newKafkaVersion(0, 10, 2, 1)
+	V0_11_0_0 = newKafkaVersion(0, 11, 0, 0)
+	V0_11_0_1 = newKafkaVersion(0, 11, 0, 1)
+	V0_11_0_2 = newKafkaVersion(0, 11, 0, 2)
+	V1_0_0_0  = newKafkaVersion(1, 0, 0, 0)
+	V1_1_0_0  = newKafkaVersion(1, 1, 0, 0)
+	V1_1_1_0  = newKafkaVersion(1, 1, 1, 0)
+	V2_0_0_0  = newKafkaVersion(2, 0, 0, 0)
+	V2_0_1_0  = newKafkaVersion(2, 0, 1, 0)
+	V2_1_0_0  = newKafkaVersion(2, 1, 0, 0)
+
+	SupportedVersions = []KafkaVersion{
+		V0_8_2_0,
+		V0_8_2_1,
+		V0_8_2_2,
+		V0_9_0_0,
+		V0_9_0_1,
+		V0_10_0_0,
+		V0_10_0_1,
+		V0_10_1_0,
+		V0_10_1_1,
+		V0_10_2_0,
+		V0_10_2_1,
+		V0_11_0_0,
+		V0_11_0_1,
+		V0_11_0_2,
+		V1_0_0_0,
+		V1_1_0_0,
+		V1_1_1_0,
+		V2_0_0_0,
+		V2_0_1_0,
+		V2_1_0_0,
+	}
+	MinVersion = V0_8_2_0
+	MaxVersion = V2_1_0_0
+)
+
+func ParseKafkaVersion(s string) (KafkaVersion, error) {
+	if len(s) < 5 {
+		return MinVersion, fmt.Errorf("invalid version `%s`", s)
+	}
+	var major, minor, veryMinor, patch uint
+	var err error
+	if s[0] == '0' {
+		err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch})
+	} else {
+		err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor})
+	}
+	if err != nil {
+		return MinVersion, err
+	}
+	return newKafkaVersion(major, minor, veryMinor, patch), nil
+}
+
+func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error {
+	if !regexp.MustCompile(pattern).MatchString(s) {
+		return fmt.Errorf("invalid version `%s`", s)
+	}
+	_, err := fmt.Sscanf(s, format, v[0], v[1], v[2])
+	return err
+}
+
+func (v KafkaVersion) String() string {
+	if v.version[0] == 0 {
+		return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3])
+	}
+
+	return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2])
+}
diff --git a/vendor/github.com/Shopify/sarama/zstd_cgo.go b/vendor/github.com/Shopify/sarama/zstd_cgo.go
new file mode 100644
index 0000000..f5ccb31
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/zstd_cgo.go
@@ -0,0 +1,13 @@
+// +build cgo
+
+package sarama
+
+import "github.com/DataDog/zstd"
+
+func zstdDecompress(dst, src []byte) ([]byte, error) {
+	return zstd.Decompress(dst, src)
+}
+
+func zstdCompressLevel(dst, src []byte, level int) ([]byte, error) {
+	return zstd.CompressLevel(dst, src, level)
+}
diff --git a/vendor/github.com/Shopify/sarama/zstd_fallback.go b/vendor/github.com/Shopify/sarama/zstd_fallback.go
new file mode 100644
index 0000000..381a56b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/zstd_fallback.go
@@ -0,0 +1,17 @@
+// +build !cgo
+
+package sarama
+
+import (
+	"errors"
+)
+
+var errZstdCgo = errors.New("zstd compression requires building with cgo enabled")
+
+func zstdDecompress(dst, src []byte) ([]byte, error) {
+	return nil, errZstdCgo
+}
+
+func zstdCompressLevel(dst, src []byte, level int) ([]byte, error) {
+	return nil, errZstdCgo
+}
diff --git a/vendor/github.com/armon/go-metrics/.gitignore b/vendor/github.com/armon/go-metrics/.gitignore
new file mode 100755
index 0000000..8c03ec1
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+/metrics.out
diff --git a/vendor/github.com/armon/go-metrics/LICENSE b/vendor/github.com/armon/go-metrics/LICENSE
new file mode 100644
index 0000000..106569e
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Armon Dadgar
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/armon/go-metrics/README.md b/vendor/github.com/armon/go-metrics/README.md
new file mode 100644
index 0000000..7b6f23e
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/README.md
@@ -0,0 +1,71 @@
+go-metrics
+==========
+
+This library provides a `metrics` package which can be used to instrument code,
+expose application metrics, and profile runtime performance in a flexible manner.
+
+Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics)
+
+Sinks
+=====
+
+The `metrics` package makes use of a `MetricSink` interface to support delivery
+to any type of backend. Currently the following sinks are provided:
+
+* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP)
+* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP)
+* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes)
+* InmemSink : Provides in-memory aggregation, can be used to export stats
+* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example.
+* BlackholeSink : Sinks to nowhere
+
+In addition to the sinks, the `InmemSignal` can be used to catch a signal,
+and dump a formatted output of recent metrics. For example, when a process gets
+a SIGUSR1, it can dump to stderr recent performance metrics for debugging.
+
+Examples
+========
+
+Here is an example of using the package:
+
+    func SlowMethod() {
+        // Profiling the runtime of a method
+        defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now())
+    }
+
+    // Configure a statsite sink as the global metrics sink
+    sink, _ := metrics.NewStatsiteSink("statsite:8125")
+    metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink)
+
+    // Emit a Key/Value pair
+    metrics.EmitKey([]string{"questions", "meaning of life"}, 42)
+
+
+Here is an example of setting up an signal handler:
+
+    // Setup the inmem sink and signal handler
+    inm := metrics.NewInmemSink(10*time.Second, time.Minute)
+    sig := metrics.DefaultInmemSignal(inm)
+    metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm)
+
+    // Run some code
+    inm.SetGauge([]string{"foo"}, 42)
+    inm.EmitKey([]string{"bar"}, 30)
+
+    inm.IncrCounter([]string{"baz"}, 42)
+    inm.IncrCounter([]string{"baz"}, 1)
+    inm.IncrCounter([]string{"baz"}, 80)
+
+    inm.AddSample([]string{"method", "wow"}, 42)
+    inm.AddSample([]string{"method", "wow"}, 100)
+    inm.AddSample([]string{"method", "wow"}, 22)
+
+    ....
+
+When a signal comes in, output like the following will be dumped to stderr:
+
+    [2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000
+    [2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000
+    [2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509
+    [2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513
+
diff --git a/vendor/github.com/armon/go-metrics/const_unix.go b/vendor/github.com/armon/go-metrics/const_unix.go
new file mode 100644
index 0000000..31098dd
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/const_unix.go
@@ -0,0 +1,12 @@
+// +build !windows
+
+package metrics
+
+import (
+	"syscall"
+)
+
+const (
+	// DefaultSignal is used with DefaultInmemSignal
+	DefaultSignal = syscall.SIGUSR1
+)
diff --git a/vendor/github.com/armon/go-metrics/const_windows.go b/vendor/github.com/armon/go-metrics/const_windows.go
new file mode 100644
index 0000000..38136af
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/const_windows.go
@@ -0,0 +1,13 @@
+// +build windows
+
+package metrics
+
+import (
+	"syscall"
+)
+
+const (
+	// DefaultSignal is used with DefaultInmemSignal
+	// Windows has no SIGUSR1, use SIGBREAK
+	DefaultSignal = syscall.Signal(21)
+)
diff --git a/vendor/github.com/armon/go-metrics/inmem.go b/vendor/github.com/armon/go-metrics/inmem.go
new file mode 100644
index 0000000..83fb6bb
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/inmem.go
@@ -0,0 +1,247 @@
+package metrics
+
+import (
+	"fmt"
+	"math"
+	"strings"
+	"sync"
+	"time"
+)
+
+// InmemSink provides a MetricSink that does in-memory aggregation
+// without sending metrics over a network. It can be embedded within
+// an application to provide profiling information.
+type InmemSink struct {
+	// How long is each aggregation interval
+	interval time.Duration
+
+	// Retain controls how many metrics interval we keep
+	retain time.Duration
+
+	// maxIntervals is the maximum length of intervals.
+	// It is retain / interval.
+	maxIntervals int
+
+	// intervals is a slice of the retained intervals
+	intervals    []*IntervalMetrics
+	intervalLock sync.RWMutex
+	
+	rateDenom float64
+}
+
+// IntervalMetrics stores the aggregated metrics
+// for a specific interval
+type IntervalMetrics struct {
+	sync.RWMutex
+
+	// The start time of the interval
+	Interval time.Time
+
+	// Gauges maps the key to the last set value
+	Gauges map[string]float32
+
+	// Points maps the string to the list of emitted values
+	// from EmitKey
+	Points map[string][]float32
+
+	// Counters maps the string key to a sum of the counter
+	// values
+	Counters map[string]*AggregateSample
+
+	// Samples maps the key to an AggregateSample,
+	// which has the rolled up view of a sample
+	Samples map[string]*AggregateSample
+}
+
+// NewIntervalMetrics creates a new IntervalMetrics for a given interval
+func NewIntervalMetrics(intv time.Time) *IntervalMetrics {
+	return &IntervalMetrics{
+		Interval: intv,
+		Gauges:   make(map[string]float32),
+		Points:   make(map[string][]float32),
+		Counters: make(map[string]*AggregateSample),
+		Samples:  make(map[string]*AggregateSample),
+	}
+}
+
+// AggregateSample is used to hold aggregate metrics
+// about a sample
+type AggregateSample struct {
+	Count       int       // The count of emitted pairs
+	Rate	        float64   // The count of emitted pairs per time unit (usually 1 second)
+	Sum         float64   // The sum of values
+	SumSq       float64   // The sum of squared values
+	Min         float64   // Minimum value
+	Max         float64   // Maximum value
+	LastUpdated time.Time // When value was last updated
+}
+
+// Computes a Stddev of the values
+func (a *AggregateSample) Stddev() float64 {
+	num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2)
+	div := float64(a.Count * (a.Count - 1))
+	if div == 0 {
+		return 0
+	}
+	return math.Sqrt(num / div)
+}
+
+// Computes a mean of the values
+func (a *AggregateSample) Mean() float64 {
+	if a.Count == 0 {
+		return 0
+	}
+	return a.Sum / float64(a.Count)
+}
+
+// Ingest is used to update a sample
+func (a *AggregateSample) Ingest(v float64, rateDenom float64) {
+	a.Count++
+	a.Sum += v
+	a.SumSq += (v * v)
+	if v < a.Min || a.Count == 1 {
+		a.Min = v
+	}
+	if v > a.Max || a.Count == 1 {
+		a.Max = v
+	}
+	a.Rate = float64(a.Count)/rateDenom
+	a.LastUpdated = time.Now()
+}
+
+func (a *AggregateSample) String() string {
+	if a.Count == 0 {
+		return "Count: 0"
+	} else if a.Stddev() == 0 {
+		return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated)
+	} else {
+		return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s",
+			a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated)
+	}
+}
+
+// NewInmemSink is used to construct a new in-memory sink.
+// Uses an aggregation interval and maximum retention period.
+func NewInmemSink(interval, retain time.Duration) *InmemSink {
+	rateTimeUnit := time.Second
+	i := &InmemSink{
+		interval:     interval,
+		retain:       retain,
+		maxIntervals: int(retain / interval),
+		rateDenom: float64(interval.Nanoseconds()) / float64(rateTimeUnit.Nanoseconds()),
+	}
+	i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals)
+	return i
+}
+
+func (i *InmemSink) SetGauge(key []string, val float32) {
+	k := i.flattenKey(key)
+	intv := i.getInterval()
+
+	intv.Lock()
+	defer intv.Unlock()
+	intv.Gauges[k] = val
+}
+
+func (i *InmemSink) EmitKey(key []string, val float32) {
+	k := i.flattenKey(key)
+	intv := i.getInterval()
+
+	intv.Lock()
+	defer intv.Unlock()
+	vals := intv.Points[k]
+	intv.Points[k] = append(vals, val)
+}
+
+func (i *InmemSink) IncrCounter(key []string, val float32) {
+	k := i.flattenKey(key)
+	intv := i.getInterval()
+
+	intv.Lock()
+	defer intv.Unlock()
+
+	agg := intv.Counters[k]
+	if agg == nil {
+		agg = &AggregateSample{}
+		intv.Counters[k] = agg
+	}
+	agg.Ingest(float64(val), i.rateDenom)
+}
+
+func (i *InmemSink) AddSample(key []string, val float32) {
+	k := i.flattenKey(key)
+	intv := i.getInterval()
+
+	intv.Lock()
+	defer intv.Unlock()
+
+	agg := intv.Samples[k]
+	if agg == nil {
+		agg = &AggregateSample{}
+		intv.Samples[k] = agg
+	}
+	agg.Ingest(float64(val), i.rateDenom)
+}
+
+// Data is used to retrieve all the aggregated metrics
+// Intervals may be in use, and a read lock should be acquired
+func (i *InmemSink) Data() []*IntervalMetrics {
+	// Get the current interval, forces creation
+	i.getInterval()
+
+	i.intervalLock.RLock()
+	defer i.intervalLock.RUnlock()
+
+	intervals := make([]*IntervalMetrics, len(i.intervals))
+	copy(intervals, i.intervals)
+	return intervals
+}
+
+func (i *InmemSink) getExistingInterval(intv time.Time) *IntervalMetrics {
+	i.intervalLock.RLock()
+	defer i.intervalLock.RUnlock()
+
+	n := len(i.intervals)
+	if n > 0 && i.intervals[n-1].Interval == intv {
+		return i.intervals[n-1]
+	}
+	return nil
+}
+
+func (i *InmemSink) createInterval(intv time.Time) *IntervalMetrics {
+	i.intervalLock.Lock()
+	defer i.intervalLock.Unlock()
+
+	// Check for an existing interval
+	n := len(i.intervals)
+	if n > 0 && i.intervals[n-1].Interval == intv {
+		return i.intervals[n-1]
+	}
+
+	// Add the current interval
+	current := NewIntervalMetrics(intv)
+	i.intervals = append(i.intervals, current)
+	n++
+
+	// Truncate the intervals if they are too long
+	if n >= i.maxIntervals {
+		copy(i.intervals[0:], i.intervals[n-i.maxIntervals:])
+		i.intervals = i.intervals[:i.maxIntervals]
+	}
+	return current
+}
+
+// getInterval returns the current interval to write to
+func (i *InmemSink) getInterval() *IntervalMetrics {
+	intv := time.Now().Truncate(i.interval)
+	if m := i.getExistingInterval(intv); m != nil {
+		return m
+	}
+	return i.createInterval(intv)
+}
+
+// Flattens the key for formatting, removes spaces
+func (i *InmemSink) flattenKey(parts []string) string {
+	joined := strings.Join(parts, ".")
+	return strings.Replace(joined, " ", "_", -1)
+}
diff --git a/vendor/github.com/armon/go-metrics/inmem_signal.go b/vendor/github.com/armon/go-metrics/inmem_signal.go
new file mode 100644
index 0000000..95d08ee
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/inmem_signal.go
@@ -0,0 +1,100 @@
+package metrics
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+	"os/signal"
+	"sync"
+	"syscall"
+)
+
+// InmemSignal is used to listen for a given signal, and when received,
+// to dump the current metrics from the InmemSink to an io.Writer
+type InmemSignal struct {
+	signal syscall.Signal
+	inm    *InmemSink
+	w      io.Writer
+	sigCh  chan os.Signal
+
+	stop     bool
+	stopCh   chan struct{}
+	stopLock sync.Mutex
+}
+
+// NewInmemSignal creates a new InmemSignal which listens for a given signal,
+// and dumps the current metrics out to a writer
+func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal {
+	i := &InmemSignal{
+		signal: sig,
+		inm:    inmem,
+		w:      w,
+		sigCh:  make(chan os.Signal, 1),
+		stopCh: make(chan struct{}),
+	}
+	signal.Notify(i.sigCh, sig)
+	go i.run()
+	return i
+}
+
+// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1
+// and writes output to stderr. Windows uses SIGBREAK
+func DefaultInmemSignal(inmem *InmemSink) *InmemSignal {
+	return NewInmemSignal(inmem, DefaultSignal, os.Stderr)
+}
+
+// Stop is used to stop the InmemSignal from listening
+func (i *InmemSignal) Stop() {
+	i.stopLock.Lock()
+	defer i.stopLock.Unlock()
+
+	if i.stop {
+		return
+	}
+	i.stop = true
+	close(i.stopCh)
+	signal.Stop(i.sigCh)
+}
+
+// run is a long running routine that handles signals
+func (i *InmemSignal) run() {
+	for {
+		select {
+		case <-i.sigCh:
+			i.dumpStats()
+		case <-i.stopCh:
+			return
+		}
+	}
+}
+
+// dumpStats is used to dump the data to output writer
+func (i *InmemSignal) dumpStats() {
+	buf := bytes.NewBuffer(nil)
+
+	data := i.inm.Data()
+	// Skip the last period which is still being aggregated
+	for i := 0; i < len(data)-1; i++ {
+		intv := data[i]
+		intv.RLock()
+		for name, val := range intv.Gauges {
+			fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val)
+		}
+		for name, vals := range intv.Points {
+			for _, val := range vals {
+				fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val)
+			}
+		}
+		for name, agg := range intv.Counters {
+			fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg)
+		}
+		for name, agg := range intv.Samples {
+			fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg)
+		}
+		intv.RUnlock()
+	}
+
+	// Write out the bytes
+	i.w.Write(buf.Bytes())
+}
diff --git a/vendor/github.com/armon/go-metrics/metrics.go b/vendor/github.com/armon/go-metrics/metrics.go
new file mode 100755
index 0000000..b818e41
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/metrics.go
@@ -0,0 +1,115 @@
+package metrics
+
+import (
+	"runtime"
+	"time"
+)
+
+func (m *Metrics) SetGauge(key []string, val float32) {
+	if m.HostName != "" && m.EnableHostname {
+		key = insert(0, m.HostName, key)
+	}
+	if m.EnableTypePrefix {
+		key = insert(0, "gauge", key)
+	}
+	if m.ServiceName != "" {
+		key = insert(0, m.ServiceName, key)
+	}
+	m.sink.SetGauge(key, val)
+}
+
+func (m *Metrics) EmitKey(key []string, val float32) {
+	if m.EnableTypePrefix {
+		key = insert(0, "kv", key)
+	}
+	if m.ServiceName != "" {
+		key = insert(0, m.ServiceName, key)
+	}
+	m.sink.EmitKey(key, val)
+}
+
+func (m *Metrics) IncrCounter(key []string, val float32) {
+	if m.EnableTypePrefix {
+		key = insert(0, "counter", key)
+	}
+	if m.ServiceName != "" {
+		key = insert(0, m.ServiceName, key)
+	}
+	m.sink.IncrCounter(key, val)
+}
+
+func (m *Metrics) AddSample(key []string, val float32) {
+	if m.EnableTypePrefix {
+		key = insert(0, "sample", key)
+	}
+	if m.ServiceName != "" {
+		key = insert(0, m.ServiceName, key)
+	}
+	m.sink.AddSample(key, val)
+}
+
+func (m *Metrics) MeasureSince(key []string, start time.Time) {
+	if m.EnableTypePrefix {
+		key = insert(0, "timer", key)
+	}
+	if m.ServiceName != "" {
+		key = insert(0, m.ServiceName, key)
+	}
+	now := time.Now()
+	elapsed := now.Sub(start)
+	msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity)
+	m.sink.AddSample(key, msec)
+}
+
+// Periodically collects runtime stats to publish
+func (m *Metrics) collectStats() {
+	for {
+		time.Sleep(m.ProfileInterval)
+		m.emitRuntimeStats()
+	}
+}
+
+// Emits various runtime statsitics
+func (m *Metrics) emitRuntimeStats() {
+	// Export number of Goroutines
+	numRoutines := runtime.NumGoroutine()
+	m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines))
+
+	// Export memory stats
+	var stats runtime.MemStats
+	runtime.ReadMemStats(&stats)
+	m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc))
+	m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys))
+	m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs))
+	m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees))
+	m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects))
+	m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs))
+	m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC))
+
+	// Export info about the last few GC runs
+	num := stats.NumGC
+
+	// Handle wrap around
+	if num < m.lastNumGC {
+		m.lastNumGC = 0
+	}
+
+	// Ensure we don't scan more than 256
+	if num-m.lastNumGC >= 256 {
+		m.lastNumGC = num - 255
+	}
+
+	for i := m.lastNumGC; i < num; i++ {
+		pause := stats.PauseNs[i%256]
+		m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause))
+	}
+	m.lastNumGC = num
+}
+
+// Inserts a string value at an index into the slice
+func insert(i int, v string, s []string) []string {
+	s = append(s, "")
+	copy(s[i+1:], s[i:])
+	s[i] = v
+	return s
+}
diff --git a/vendor/github.com/armon/go-metrics/sink.go b/vendor/github.com/armon/go-metrics/sink.go
new file mode 100755
index 0000000..0c240c2
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/sink.go
@@ -0,0 +1,52 @@
+package metrics
+
+// The MetricSink interface is used to transmit metrics information
+// to an external system
+type MetricSink interface {
+	// A Gauge should retain the last value it is set to
+	SetGauge(key []string, val float32)
+
+	// Should emit a Key/Value pair for each call
+	EmitKey(key []string, val float32)
+
+	// Counters should accumulate values
+	IncrCounter(key []string, val float32)
+
+	// Samples are for timing information, where quantiles are used
+	AddSample(key []string, val float32)
+}
+
+// BlackholeSink is used to just blackhole messages
+type BlackholeSink struct{}
+
+func (*BlackholeSink) SetGauge(key []string, val float32)    {}
+func (*BlackholeSink) EmitKey(key []string, val float32)     {}
+func (*BlackholeSink) IncrCounter(key []string, val float32) {}
+func (*BlackholeSink) AddSample(key []string, val float32)   {}
+
+// FanoutSink is used to sink to fanout values to multiple sinks
+type FanoutSink []MetricSink
+
+func (fh FanoutSink) SetGauge(key []string, val float32) {
+	for _, s := range fh {
+		s.SetGauge(key, val)
+	}
+}
+
+func (fh FanoutSink) EmitKey(key []string, val float32) {
+	for _, s := range fh {
+		s.EmitKey(key, val)
+	}
+}
+
+func (fh FanoutSink) IncrCounter(key []string, val float32) {
+	for _, s := range fh {
+		s.IncrCounter(key, val)
+	}
+}
+
+func (fh FanoutSink) AddSample(key []string, val float32) {
+	for _, s := range fh {
+		s.AddSample(key, val)
+	}
+}
diff --git a/vendor/github.com/armon/go-metrics/start.go b/vendor/github.com/armon/go-metrics/start.go
new file mode 100755
index 0000000..44113f1
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/start.go
@@ -0,0 +1,95 @@
+package metrics
+
+import (
+	"os"
+	"time"
+)
+
+// Config is used to configure metrics settings
+type Config struct {
+	ServiceName          string        // Prefixed with keys to seperate services
+	HostName             string        // Hostname to use. If not provided and EnableHostname, it will be os.Hostname
+	EnableHostname       bool          // Enable prefixing gauge values with hostname
+	EnableRuntimeMetrics bool          // Enables profiling of runtime metrics (GC, Goroutines, Memory)
+	EnableTypePrefix     bool          // Prefixes key with a type ("counter", "gauge", "timer")
+	TimerGranularity     time.Duration // Granularity of timers.
+	ProfileInterval      time.Duration // Interval to profile runtime metrics
+}
+
+// Metrics represents an instance of a metrics sink that can
+// be used to emit
+type Metrics struct {
+	Config
+	lastNumGC uint32
+	sink      MetricSink
+}
+
+// Shared global metrics instance
+var globalMetrics *Metrics
+
+func init() {
+	// Initialize to a blackhole sink to avoid errors
+	globalMetrics = &Metrics{sink: &BlackholeSink{}}
+}
+
+// DefaultConfig provides a sane default configuration
+func DefaultConfig(serviceName string) *Config {
+	c := &Config{
+		ServiceName:          serviceName, // Use client provided service
+		HostName:             "",
+		EnableHostname:       true,             // Enable hostname prefix
+		EnableRuntimeMetrics: true,             // Enable runtime profiling
+		EnableTypePrefix:     false,            // Disable type prefix
+		TimerGranularity:     time.Millisecond, // Timers are in milliseconds
+		ProfileInterval:      time.Second,      // Poll runtime every second
+	}
+
+	// Try to get the hostname
+	name, _ := os.Hostname()
+	c.HostName = name
+	return c
+}
+
+// New is used to create a new instance of Metrics
+func New(conf *Config, sink MetricSink) (*Metrics, error) {
+	met := &Metrics{}
+	met.Config = *conf
+	met.sink = sink
+
+	// Start the runtime collector
+	if conf.EnableRuntimeMetrics {
+		go met.collectStats()
+	}
+	return met, nil
+}
+
+// NewGlobal is the same as New, but it assigns the metrics object to be
+// used globally as well as returning it.
+func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) {
+	metrics, err := New(conf, sink)
+	if err == nil {
+		globalMetrics = metrics
+	}
+	return metrics, err
+}
+
+// Proxy all the methods to the globalMetrics instance
+func SetGauge(key []string, val float32) {
+	globalMetrics.SetGauge(key, val)
+}
+
+func EmitKey(key []string, val float32) {
+	globalMetrics.EmitKey(key, val)
+}
+
+func IncrCounter(key []string, val float32) {
+	globalMetrics.IncrCounter(key, val)
+}
+
+func AddSample(key []string, val float32) {
+	globalMetrics.AddSample(key, val)
+}
+
+func MeasureSince(key []string, start time.Time) {
+	globalMetrics.MeasureSince(key, start)
+}
diff --git a/vendor/github.com/armon/go-metrics/statsd.go b/vendor/github.com/armon/go-metrics/statsd.go
new file mode 100644
index 0000000..65a5021
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/statsd.go
@@ -0,0 +1,154 @@
+package metrics
+
+import (
+	"bytes"
+	"fmt"
+	"log"
+	"net"
+	"strings"
+	"time"
+)
+
+const (
+	// statsdMaxLen is the maximum size of a packet
+	// to send to statsd
+	statsdMaxLen = 1400
+)
+
+// StatsdSink provides a MetricSink that can be used
+// with a statsite or statsd metrics server. It uses
+// only UDP packets, while StatsiteSink uses TCP.
+type StatsdSink struct {
+	addr        string
+	metricQueue chan string
+}
+
+// NewStatsdSink is used to create a new StatsdSink
+func NewStatsdSink(addr string) (*StatsdSink, error) {
+	s := &StatsdSink{
+		addr:        addr,
+		metricQueue: make(chan string, 4096),
+	}
+	go s.flushMetrics()
+	return s, nil
+}
+
+// Close is used to stop flushing to statsd
+func (s *StatsdSink) Shutdown() {
+	close(s.metricQueue)
+}
+
+func (s *StatsdSink) SetGauge(key []string, val float32) {
+	flatKey := s.flattenKey(key)
+	s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
+}
+
+func (s *StatsdSink) EmitKey(key []string, val float32) {
+	flatKey := s.flattenKey(key)
+	s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
+}
+
+func (s *StatsdSink) IncrCounter(key []string, val float32) {
+	flatKey := s.flattenKey(key)
+	s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
+}
+
+func (s *StatsdSink) AddSample(key []string, val float32) {
+	flatKey := s.flattenKey(key)
+	s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
+}
+
+// Flattens the key for formatting, removes spaces
+func (s *StatsdSink) flattenKey(parts []string) string {
+	joined := strings.Join(parts, ".")
+	return strings.Map(func(r rune) rune {
+		switch r {
+		case ':':
+			fallthrough
+		case ' ':
+			return '_'
+		default:
+			return r
+		}
+	}, joined)
+}
+
+// Does a non-blocking push to the metrics queue
+func (s *StatsdSink) pushMetric(m string) {
+	select {
+	case s.metricQueue <- m:
+	default:
+	}
+}
+
+// Flushes metrics
+func (s *StatsdSink) flushMetrics() {
+	var sock net.Conn
+	var err error
+	var wait <-chan time.Time
+	ticker := time.NewTicker(flushInterval)
+	defer ticker.Stop()
+
+CONNECT:
+	// Create a buffer
+	buf := bytes.NewBuffer(nil)
+
+	// Attempt to connect
+	sock, err = net.Dial("udp", s.addr)
+	if err != nil {
+		log.Printf("[ERR] Error connecting to statsd! Err: %s", err)
+		goto WAIT
+	}
+
+	for {
+		select {
+		case metric, ok := <-s.metricQueue:
+			// Get a metric from the queue
+			if !ok {
+				goto QUIT
+			}
+
+			// Check if this would overflow the packet size
+			if len(metric)+buf.Len() > statsdMaxLen {
+				_, err := sock.Write(buf.Bytes())
+				buf.Reset()
+				if err != nil {
+					log.Printf("[ERR] Error writing to statsd! Err: %s", err)
+					goto WAIT
+				}
+			}
+
+			// Append to the buffer
+			buf.WriteString(metric)
+
+		case <-ticker.C:
+			if buf.Len() == 0 {
+				continue
+			}
+
+			_, err := sock.Write(buf.Bytes())
+			buf.Reset()
+			if err != nil {
+				log.Printf("[ERR] Error flushing to statsd! Err: %s", err)
+				goto WAIT
+			}
+		}
+	}
+
+WAIT:
+	// Wait for a while
+	wait = time.After(time.Duration(5) * time.Second)
+	for {
+		select {
+		// Dequeue the messages to avoid backlog
+		case _, ok := <-s.metricQueue:
+			if !ok {
+				goto QUIT
+			}
+		case <-wait:
+			goto CONNECT
+		}
+	}
+QUIT:
+	s.metricQueue = nil
+}
diff --git a/vendor/github.com/armon/go-metrics/statsite.go b/vendor/github.com/armon/go-metrics/statsite.go
new file mode 100755
index 0000000..6873013
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/statsite.go
@@ -0,0 +1,142 @@
+package metrics
+
+import (
+	"bufio"
+	"fmt"
+	"log"
+	"net"
+	"strings"
+	"time"
+)
+
+const (
+	// We force flush the statsite metrics after this period of
+	// inactivity. Prevents stats from getting stuck in a buffer
+	// forever.
+	flushInterval = 100 * time.Millisecond
+)
+
+// StatsiteSink provides a MetricSink that can be used with a
+// statsite metrics server
+type StatsiteSink struct {
+	addr        string
+	metricQueue chan string
+}
+
+// NewStatsiteSink is used to create a new StatsiteSink
+func NewStatsiteSink(addr string) (*StatsiteSink, error) {
+	s := &StatsiteSink{
+		addr:        addr,
+		metricQueue: make(chan string, 4096),
+	}
+	go s.flushMetrics()
+	return s, nil
+}
+
+// Close is used to stop flushing to statsite
+func (s *StatsiteSink) Shutdown() {
+	close(s.metricQueue)
+}
+
+func (s *StatsiteSink) SetGauge(key []string, val float32) {
+	flatKey := s.flattenKey(key)
+	s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
+}
+
+func (s *StatsiteSink) EmitKey(key []string, val float32) {
+	flatKey := s.flattenKey(key)
+	s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
+}
+
+func (s *StatsiteSink) IncrCounter(key []string, val float32) {
+	flatKey := s.flattenKey(key)
+	s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
+}
+
+func (s *StatsiteSink) AddSample(key []string, val float32) {
+	flatKey := s.flattenKey(key)
+	s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
+}
+
+// Flattens the key for formatting, removes spaces
+func (s *StatsiteSink) flattenKey(parts []string) string {
+	joined := strings.Join(parts, ".")
+	return strings.Map(func(r rune) rune {
+		switch r {
+		case ':':
+			fallthrough
+		case ' ':
+			return '_'
+		default:
+			return r
+		}
+	}, joined)
+}
+
+// Does a non-blocking push to the metrics queue
+func (s *StatsiteSink) pushMetric(m string) {
+	select {
+	case s.metricQueue <- m:
+	default:
+	}
+}
+
+// Flushes metrics
+func (s *StatsiteSink) flushMetrics() {
+	var sock net.Conn
+	var err error
+	var wait <-chan time.Time
+	var buffered *bufio.Writer
+	ticker := time.NewTicker(flushInterval)
+	defer ticker.Stop()
+
+CONNECT:
+	// Attempt to connect
+	sock, err = net.Dial("tcp", s.addr)
+	if err != nil {
+		log.Printf("[ERR] Error connecting to statsite! Err: %s", err)
+		goto WAIT
+	}
+
+	// Create a buffered writer
+	buffered = bufio.NewWriter(sock)
+
+	for {
+		select {
+		case metric, ok := <-s.metricQueue:
+			// Get a metric from the queue
+			if !ok {
+				goto QUIT
+			}
+
+			// Try to send to statsite
+			_, err := buffered.Write([]byte(metric))
+			if err != nil {
+				log.Printf("[ERR] Error writing to statsite! Err: %s", err)
+				goto WAIT
+			}
+		case <-ticker.C:
+			if err := buffered.Flush(); err != nil {
+				log.Printf("[ERR] Error flushing to statsite! Err: %s", err)
+				goto WAIT
+			}
+		}
+	}
+
+WAIT:
+	// Wait for a while
+	wait = time.After(time.Duration(5) * time.Second)
+	for {
+		select {
+		// Dequeue the messages to avoid backlog
+		case _, ok := <-s.metricQueue:
+			if !ok {
+				goto QUIT
+			}
+		case <-wait:
+			goto CONNECT
+		}
+	}
+QUIT:
+	s.metricQueue = nil
+}
diff --git a/vendor/github.com/bsm/sarama-cluster/.gitignore b/vendor/github.com/bsm/sarama-cluster/.gitignore
new file mode 100644
index 0000000..88113c5
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/.gitignore
@@ -0,0 +1,4 @@
+*.log
+*.pid
+kafka*/
+vendor/
diff --git a/vendor/github.com/bsm/sarama-cluster/.travis.yml b/vendor/github.com/bsm/sarama-cluster/.travis.yml
new file mode 100644
index 0000000..07c7c10
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/.travis.yml
@@ -0,0 +1,18 @@
+sudo: false
+language: go
+go:
+  - 1.10.x
+  - 1.9.x
+install:
+  - go get -u github.com/golang/dep/cmd/dep
+  - dep ensure
+env:
+  - SCALA_VERSION=2.12 KAFKA_VERSION=0.11.0.1
+  - SCALA_VERSION=2.12 KAFKA_VERSION=1.0.1
+  - SCALA_VERSION=2.12 KAFKA_VERSION=1.1.0
+script:
+  - make default test-race
+addons:
+  apt:
+    packages:
+      - oracle-java8-set-default
diff --git a/vendor/github.com/bsm/sarama-cluster/Gopkg.lock b/vendor/github.com/bsm/sarama-cluster/Gopkg.lock
new file mode 100644
index 0000000..e1bc110
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/Gopkg.lock
@@ -0,0 +1,151 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+  name = "github.com/Shopify/sarama"
+  packages = ["."]
+  revision = "35324cf48e33d8260e1c7c18854465a904ade249"
+  version = "v1.17.0"
+
+[[projects]]
+  name = "github.com/davecgh/go-spew"
+  packages = ["spew"]
+  revision = "346938d642f2ec3594ed81d874461961cd0faa76"
+  version = "v1.1.0"
+
+[[projects]]
+  name = "github.com/eapache/go-resiliency"
+  packages = ["breaker"]
+  revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce"
+  version = "v1.1.0"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/eapache/go-xerial-snappy"
+  packages = ["."]
+  revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c"
+
+[[projects]]
+  name = "github.com/eapache/queue"
+  packages = ["."]
+  revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98"
+  version = "v1.1.0"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/golang/snappy"
+  packages = ["."]
+  revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
+
+[[projects]]
+  name = "github.com/onsi/ginkgo"
+  packages = [
+    ".",
+    "config",
+    "extensions/table",
+    "internal/codelocation",
+    "internal/containernode",
+    "internal/failer",
+    "internal/leafnodes",
+    "internal/remote",
+    "internal/spec",
+    "internal/spec_iterator",
+    "internal/specrunner",
+    "internal/suite",
+    "internal/testingtproxy",
+    "internal/writer",
+    "reporters",
+    "reporters/stenographer",
+    "reporters/stenographer/support/go-colorable",
+    "reporters/stenographer/support/go-isatty",
+    "types"
+  ]
+  revision = "fa5fabab2a1bfbd924faf4c067d07ae414e2aedf"
+  version = "v1.5.0"
+
+[[projects]]
+  name = "github.com/onsi/gomega"
+  packages = [
+    ".",
+    "format",
+    "internal/assertion",
+    "internal/asyncassertion",
+    "internal/oraclematcher",
+    "internal/testingtsupport",
+    "matchers",
+    "matchers/support/goraph/bipartitegraph",
+    "matchers/support/goraph/edge",
+    "matchers/support/goraph/node",
+    "matchers/support/goraph/util",
+    "types"
+  ]
+  revision = "62bff4df71bdbc266561a0caee19f0594b17c240"
+  version = "v1.4.0"
+
+[[projects]]
+  name = "github.com/pierrec/lz4"
+  packages = [
+    ".",
+    "internal/xxh32"
+  ]
+  revision = "6b9367c9ff401dbc54fabce3fb8d972e799b702d"
+  version = "v2.0.2"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/rcrowley/go-metrics"
+  packages = ["."]
+  revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
+
+[[projects]]
+  branch = "master"
+  name = "golang.org/x/net"
+  packages = [
+    "html",
+    "html/atom",
+    "html/charset"
+  ]
+  revision = "afe8f62b1d6bbd81f31868121a50b06d8188e1f9"
+
+[[projects]]
+  branch = "master"
+  name = "golang.org/x/sys"
+  packages = ["unix"]
+  revision = "63fc586f45fe72d95d5240a5d5eb95e6503907d3"
+
+[[projects]]
+  name = "golang.org/x/text"
+  packages = [
+    "encoding",
+    "encoding/charmap",
+    "encoding/htmlindex",
+    "encoding/internal",
+    "encoding/internal/identifier",
+    "encoding/japanese",
+    "encoding/korean",
+    "encoding/simplifiedchinese",
+    "encoding/traditionalchinese",
+    "encoding/unicode",
+    "internal/gen",
+    "internal/tag",
+    "internal/utf8internal",
+    "language",
+    "runes",
+    "transform",
+    "unicode/cldr"
+  ]
+  revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
+  version = "v0.3.0"
+
+[[projects]]
+  name = "gopkg.in/yaml.v2"
+  packages = ["."]
+  revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
+  version = "v2.2.1"
+
+[solve-meta]
+  analyzer-name = "dep"
+  analyzer-version = 1
+  inputs-digest = "2fa33a2d1ae87e0905ef09332bb4b3fda29179f6bcd48fd3b94070774b9e458b"
+  solver-name = "gps-cdcl"
+  solver-version = 1
diff --git a/vendor/github.com/bsm/sarama-cluster/Gopkg.toml b/vendor/github.com/bsm/sarama-cluster/Gopkg.toml
new file mode 100644
index 0000000..1eecfef
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/Gopkg.toml
@@ -0,0 +1,26 @@
+
+# Gopkg.toml example
+#
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+#   name = "github.com/user/project"
+#   version = "1.0.0"
+#
+# [[constraint]]
+#   name = "github.com/user/project2"
+#   branch = "dev"
+#   source = "github.com/myfork/project2"
+#
+# [[override]]
+#  name = "github.com/x/y"
+#  version = "2.4.0"
+
+
+[[constraint]]
+  name = "github.com/Shopify/sarama"
+  version = "^1.14.0"
diff --git a/vendor/github.com/bsm/sarama-cluster/LICENSE b/vendor/github.com/bsm/sarama-cluster/LICENSE
new file mode 100644
index 0000000..127751c
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/LICENSE
@@ -0,0 +1,22 @@
+(The MIT License)
+
+Copyright (c) 2017 Black Square Media Ltd
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+'Software'), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/bsm/sarama-cluster/Makefile b/vendor/github.com/bsm/sarama-cluster/Makefile
new file mode 100644
index 0000000..25c5bc2
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/Makefile
@@ -0,0 +1,35 @@
+SCALA_VERSION?= 2.12
+KAFKA_VERSION?= 1.1.0
+KAFKA_DIR= kafka_$(SCALA_VERSION)-$(KAFKA_VERSION)
+KAFKA_SRC= https://archive.apache.org/dist/kafka/$(KAFKA_VERSION)/$(KAFKA_DIR).tgz
+KAFKA_ROOT= testdata/$(KAFKA_DIR)
+PKG=$(shell go list ./... | grep -v vendor)
+
+default: vet test
+
+vet:
+	go vet $(PKG)
+
+test: testdeps
+	KAFKA_DIR=$(KAFKA_DIR) go test $(PKG) -ginkgo.slowSpecThreshold=60
+
+test-verbose: testdeps
+	KAFKA_DIR=$(KAFKA_DIR) go test $(PKG) -ginkgo.slowSpecThreshold=60 -v
+
+test-race: testdeps
+	KAFKA_DIR=$(KAFKA_DIR) go test $(PKG) -ginkgo.slowSpecThreshold=60 -v -race
+
+testdeps: $(KAFKA_ROOT)
+
+doc: README.md
+
+.PHONY: test testdeps vet doc
+
+# ---------------------------------------------------------------------
+
+$(KAFKA_ROOT):
+	@mkdir -p $(dir $@)
+	cd $(dir $@) && curl -sSL $(KAFKA_SRC) | tar xz
+
+README.md: README.md.tpl $(wildcard *.go)
+	becca -package $(subst $(GOPATH)/src/,,$(PWD))
diff --git a/vendor/github.com/bsm/sarama-cluster/README.md b/vendor/github.com/bsm/sarama-cluster/README.md
new file mode 100644
index 0000000..ebcd755
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/README.md
@@ -0,0 +1,151 @@
+# Sarama Cluster
+
+[![GoDoc](https://godoc.org/github.com/bsm/sarama-cluster?status.svg)](https://godoc.org/github.com/bsm/sarama-cluster)
+[![Build Status](https://travis-ci.org/bsm/sarama-cluster.svg?branch=master)](https://travis-ci.org/bsm/sarama-cluster)
+[![Go Report Card](https://goreportcard.com/badge/github.com/bsm/sarama-cluster)](https://goreportcard.com/report/github.com/bsm/sarama-cluster)
+[![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
+
+Cluster extensions for [Sarama](https://github.com/Shopify/sarama), the Go client library for Apache Kafka 0.9 (and later).
+
+## Documentation
+
+Documentation and example are available via godoc at http://godoc.org/github.com/bsm/sarama-cluster
+
+## Examples
+
+Consumers have two modes of operation. In the default multiplexed mode messages (and errors) of multiple
+topics and partitions are all passed to the single channel:
+
+```go
+package main
+
+import (
+	"fmt"
+	"log"
+	"os"
+	"os/signal"
+
+	cluster "github.com/bsm/sarama-cluster"
+)
+
+func main() {
+
+	// init (custom) config, enable errors and notifications
+	config := cluster.NewConfig()
+	config.Consumer.Return.Errors = true
+	config.Group.Return.Notifications = true
+
+	// init consumer
+	brokers := []string{"127.0.0.1:9092"}
+	topics := []string{"my_topic", "other_topic"}
+	consumer, err := cluster.NewConsumer(brokers, "my-consumer-group", topics, config)
+	if err != nil {
+		panic(err)
+	}
+	defer consumer.Close()
+
+	// trap SIGINT to trigger a shutdown.
+	signals := make(chan os.Signal, 1)
+	signal.Notify(signals, os.Interrupt)
+
+	// consume errors
+	go func() {
+		for err := range consumer.Errors() {
+			log.Printf("Error: %s\n", err.Error())
+		}
+	}()
+
+	// consume notifications
+	go func() {
+		for ntf := range consumer.Notifications() {
+			log.Printf("Rebalanced: %+v\n", ntf)
+		}
+	}()
+
+	// consume messages, watch signals
+	for {
+		select {
+		case msg, ok := <-consumer.Messages():
+			if ok {
+				fmt.Fprintf(os.Stdout, "%s/%d/%d\t%s\t%s\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
+				consumer.MarkOffset(msg, "")	// mark message as processed
+			}
+		case <-signals:
+			return
+		}
+	}
+}
+```
+
+Users who require access to individual partitions can use the partitioned mode which exposes access to partition-level
+consumers:
+
+```go
+package main
+
+import (
+  "fmt"
+  "log"
+  "os"
+  "os/signal"
+
+  cluster "github.com/bsm/sarama-cluster"
+)
+
+func main() {
+
+	// init (custom) config, set mode to ConsumerModePartitions
+	config := cluster.NewConfig()
+	config.Group.Mode = cluster.ConsumerModePartitions
+
+	// init consumer
+	brokers := []string{"127.0.0.1:9092"}
+	topics := []string{"my_topic", "other_topic"}
+	consumer, err := cluster.NewConsumer(brokers, "my-consumer-group", topics, config)
+	if err != nil {
+		panic(err)
+	}
+	defer consumer.Close()
+
+	// trap SIGINT to trigger a shutdown.
+	signals := make(chan os.Signal, 1)
+	signal.Notify(signals, os.Interrupt)
+
+	// consume partitions
+	for {
+		select {
+		case part, ok := <-consumer.Partitions():
+			if !ok {
+				return
+			}
+
+			// start a separate goroutine to consume messages
+			go func(pc cluster.PartitionConsumer) {
+				for msg := range pc.Messages() {
+					fmt.Fprintf(os.Stdout, "%s/%d/%d\t%s\t%s\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
+					consumer.MarkOffset(msg, "")	// mark message as processed
+				}
+			}(part)
+		case <-signals:
+			return
+		}
+	}
+}
+```
+
+## Running tests
+
+You need to install Ginkgo & Gomega to run tests. Please see
+http://onsi.github.io/ginkgo for more details.
+
+To run tests, call:
+
+	$ make test
+
+## Troubleshooting
+
+### Consumer not receiving any messages?
+
+By default, sarama's `Config.Consumer.Offsets.Initial` is set to `sarama.OffsetNewest`. This means that in the event that a brand new consumer is created, and it has never committed any offsets to kafka, it will only receive messages starting from the message after the current one that was written.
+
+If you wish to receive all messages (from the start of all messages in the topic) in the event that a consumer does not have any offsets committed to kafka, you need to set `Config.Consumer.Offsets.Initial` to `sarama.OffsetOldest`.
diff --git a/vendor/github.com/bsm/sarama-cluster/README.md.tpl b/vendor/github.com/bsm/sarama-cluster/README.md.tpl
new file mode 100644
index 0000000..5f63a69
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/README.md.tpl
@@ -0,0 +1,67 @@
+# Sarama Cluster
+
+[![GoDoc](https://godoc.org/github.com/bsm/sarama-cluster?status.svg)](https://godoc.org/github.com/bsm/sarama-cluster)
+[![Build Status](https://travis-ci.org/bsm/sarama-cluster.svg?branch=master)](https://travis-ci.org/bsm/sarama-cluster)
+[![Go Report Card](https://goreportcard.com/badge/github.com/bsm/sarama-cluster)](https://goreportcard.com/report/github.com/bsm/sarama-cluster)
+[![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
+
+Cluster extensions for [Sarama](https://github.com/Shopify/sarama), the Go client library for Apache Kafka 0.9 (and later).
+
+## Documentation
+
+Documentation and example are available via godoc at http://godoc.org/github.com/bsm/sarama-cluster
+
+## Examples
+
+Consumers have two modes of operation. In the default multiplexed mode messages (and errors) of multiple
+topics and partitions are all passed to the single channel:
+
+```go
+package main
+
+import (
+	"fmt"
+	"log"
+	"os"
+	"os/signal"
+
+	cluster "github.com/bsm/sarama-cluster"
+)
+
+func main() {{ "ExampleConsumer" | code }}
+```
+
+Users who require access to individual partitions can use the partitioned mode which exposes access to partition-level
+consumers:
+
+```go
+package main
+
+import (
+  "fmt"
+  "log"
+  "os"
+  "os/signal"
+
+  cluster "github.com/bsm/sarama-cluster"
+)
+
+func main() {{ "ExampleConsumer_Partitions" | code }}
+```
+
+## Running tests
+
+You need to install Ginkgo & Gomega to run tests. Please see
+http://onsi.github.io/ginkgo for more details.
+
+To run tests, call:
+
+	$ make test
+
+## Troubleshooting
+
+### Consumer not receiving any messages?
+
+By default, sarama's `Config.Consumer.Offsets.Initial` is set to `sarama.OffsetNewest`. This means that in the event that a brand new consumer is created, and it has never committed any offsets to kafka, it will only receive messages starting from the message after the current one that was written.
+
+If you wish to receive all messages (from the start of all messages in the topic) in the event that a consumer does not have any offsets committed to kafka, you need to set `Config.Consumer.Offsets.Initial` to `sarama.OffsetOldest`.
diff --git a/vendor/github.com/bsm/sarama-cluster/balancer.go b/vendor/github.com/bsm/sarama-cluster/balancer.go
new file mode 100644
index 0000000..3aeaece
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/balancer.go
@@ -0,0 +1,170 @@
+package cluster
+
+import (
+	"math"
+	"sort"
+
+	"github.com/Shopify/sarama"
+)
+
+// NotificationType defines the type of notification
+type NotificationType uint8
+
+// String describes the notification type
+func (t NotificationType) String() string {
+	switch t {
+	case RebalanceStart:
+		return "rebalance start"
+	case RebalanceOK:
+		return "rebalance OK"
+	case RebalanceError:
+		return "rebalance error"
+	}
+	return "unknown"
+}
+
+const (
+	UnknownNotification NotificationType = iota
+	RebalanceStart
+	RebalanceOK
+	RebalanceError
+)
+
+// Notification are state events emitted by the consumers on rebalance
+type Notification struct {
+	// Type exposes the notification type
+	Type NotificationType
+
+	// Claimed contains topic/partitions that were claimed by this rebalance cycle
+	Claimed map[string][]int32
+
+	// Released contains topic/partitions that were released as part of this rebalance cycle
+	Released map[string][]int32
+
+	// Current are topic/partitions that are currently claimed to the consumer
+	Current map[string][]int32
+}
+
+func newNotification(current map[string][]int32) *Notification {
+	return &Notification{
+		Type:    RebalanceStart,
+		Current: current,
+	}
+}
+
+func (n *Notification) success(current map[string][]int32) *Notification {
+	o := &Notification{
+		Type:     RebalanceOK,
+		Claimed:  make(map[string][]int32),
+		Released: make(map[string][]int32),
+		Current:  current,
+	}
+	for topic, partitions := range current {
+		o.Claimed[topic] = int32Slice(partitions).Diff(int32Slice(n.Current[topic]))
+	}
+	for topic, partitions := range n.Current {
+		o.Released[topic] = int32Slice(partitions).Diff(int32Slice(current[topic]))
+	}
+	return o
+}
+
+// --------------------------------------------------------------------
+
+type topicInfo struct {
+	Partitions []int32
+	MemberIDs  []string
+}
+
+func (info topicInfo) Perform(s Strategy) map[string][]int32 {
+	if s == StrategyRoundRobin {
+		return info.RoundRobin()
+	}
+	return info.Ranges()
+}
+
+func (info topicInfo) Ranges() map[string][]int32 {
+	sort.Strings(info.MemberIDs)
+
+	mlen := len(info.MemberIDs)
+	plen := len(info.Partitions)
+	res := make(map[string][]int32, mlen)
+
+	for pos, memberID := range info.MemberIDs {
+		n, i := float64(plen)/float64(mlen), float64(pos)
+		min := int(math.Floor(i*n + 0.5))
+		max := int(math.Floor((i+1)*n + 0.5))
+		sub := info.Partitions[min:max]
+		if len(sub) > 0 {
+			res[memberID] = sub
+		}
+	}
+	return res
+}
+
+func (info topicInfo) RoundRobin() map[string][]int32 {
+	sort.Strings(info.MemberIDs)
+
+	mlen := len(info.MemberIDs)
+	res := make(map[string][]int32, mlen)
+	for i, pnum := range info.Partitions {
+		memberID := info.MemberIDs[i%mlen]
+		res[memberID] = append(res[memberID], pnum)
+	}
+	return res
+}
+
+// --------------------------------------------------------------------
+
+type balancer struct {
+	client sarama.Client
+	topics map[string]topicInfo
+}
+
+func newBalancerFromMeta(client sarama.Client, members map[string]sarama.ConsumerGroupMemberMetadata) (*balancer, error) {
+	balancer := newBalancer(client)
+	for memberID, meta := range members {
+		for _, topic := range meta.Topics {
+			if err := balancer.Topic(topic, memberID); err != nil {
+				return nil, err
+			}
+		}
+	}
+	return balancer, nil
+}
+
+func newBalancer(client sarama.Client) *balancer {
+	return &balancer{
+		client: client,
+		topics: make(map[string]topicInfo),
+	}
+}
+
+func (r *balancer) Topic(name string, memberID string) error {
+	topic, ok := r.topics[name]
+	if !ok {
+		nums, err := r.client.Partitions(name)
+		if err != nil {
+			return err
+		}
+		topic = topicInfo{
+			Partitions: nums,
+			MemberIDs:  make([]string, 0, 1),
+		}
+	}
+	topic.MemberIDs = append(topic.MemberIDs, memberID)
+	r.topics[name] = topic
+	return nil
+}
+
+func (r *balancer) Perform(s Strategy) map[string]map[string][]int32 {
+	res := make(map[string]map[string][]int32, 1)
+	for topic, info := range r.topics {
+		for memberID, partitions := range info.Perform(s) {
+			if _, ok := res[memberID]; !ok {
+				res[memberID] = make(map[string][]int32, 1)
+			}
+			res[memberID][topic] = partitions
+		}
+	}
+	return res
+}
diff --git a/vendor/github.com/bsm/sarama-cluster/client.go b/vendor/github.com/bsm/sarama-cluster/client.go
new file mode 100644
index 0000000..42ffb30
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/client.go
@@ -0,0 +1,50 @@
+package cluster
+
+import (
+	"errors"
+	"sync/atomic"
+
+	"github.com/Shopify/sarama"
+)
+
+var errClientInUse = errors.New("cluster: client is already used by another consumer")
+
+// Client is a group client
+type Client struct {
+	sarama.Client
+	config Config
+
+	inUse uint32
+}
+
+// NewClient creates a new client instance
+func NewClient(addrs []string, config *Config) (*Client, error) {
+	if config == nil {
+		config = NewConfig()
+	}
+
+	if err := config.Validate(); err != nil {
+		return nil, err
+	}
+
+	client, err := sarama.NewClient(addrs, &config.Config)
+	if err != nil {
+		return nil, err
+	}
+
+	return &Client{Client: client, config: *config}, nil
+}
+
+// ClusterConfig returns the cluster configuration.
+func (c *Client) ClusterConfig() *Config {
+	cfg := c.config
+	return &cfg
+}
+
+func (c *Client) claim() bool {
+	return atomic.CompareAndSwapUint32(&c.inUse, 0, 1)
+}
+
+func (c *Client) release() {
+	atomic.CompareAndSwapUint32(&c.inUse, 1, 0)
+}
diff --git a/vendor/github.com/bsm/sarama-cluster/cluster.go b/vendor/github.com/bsm/sarama-cluster/cluster.go
new file mode 100644
index 0000000..adcf0e9
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/cluster.go
@@ -0,0 +1,25 @@
+package cluster
+
+// Strategy for partition to consumer assignement
+type Strategy string
+
+const (
+	// StrategyRange is the default and assigns partition ranges to consumers.
+	// Example with six partitions and two consumers:
+	//   C1: [0, 1, 2]
+	//   C2: [3, 4, 5]
+	StrategyRange Strategy = "range"
+
+	// StrategyRoundRobin assigns partitions by alternating over consumers.
+	// Example with six partitions and two consumers:
+	//   C1: [0, 2, 4]
+	//   C2: [1, 3, 5]
+	StrategyRoundRobin Strategy = "roundrobin"
+)
+
+// Error instances are wrappers for internal errors with a context and
+// may be returned through the consumer's Errors() channel
+type Error struct {
+	Ctx string
+	error
+}
diff --git a/vendor/github.com/bsm/sarama-cluster/config.go b/vendor/github.com/bsm/sarama-cluster/config.go
new file mode 100644
index 0000000..084b835
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/config.go
@@ -0,0 +1,146 @@
+package cluster
+
+import (
+	"regexp"
+	"time"
+
+	"github.com/Shopify/sarama"
+)
+
+var minVersion = sarama.V0_9_0_0
+
+type ConsumerMode uint8
+
+const (
+	ConsumerModeMultiplex ConsumerMode = iota
+	ConsumerModePartitions
+)
+
+// Config extends sarama.Config with Group specific namespace
+type Config struct {
+	sarama.Config
+
+	// Group is the namespace for group management properties
+	Group struct {
+
+		// The strategy to use for the allocation of partitions to consumers (defaults to StrategyRange)
+		PartitionStrategy Strategy
+
+		// By default, messages and errors from the subscribed topics and partitions are all multiplexed and
+		// made available through the consumer's Messages() and Errors() channels.
+		//
+		// Users who require low-level access can enable ConsumerModePartitions where individual partitions
+		// are exposed on the Partitions() channel. Messages and errors must then be consumed on the partitions
+		// themselves.
+		Mode ConsumerMode
+
+		Offsets struct {
+			Retry struct {
+				// The numer retries when committing offsets (defaults to 3).
+				Max int
+			}
+			Synchronization struct {
+				// The duration allowed for other clients to commit their offsets before resumption in this client, e.g. during a rebalance
+				// NewConfig sets this to the Consumer.MaxProcessingTime duration of the Sarama configuration
+				DwellTime time.Duration
+			}
+		}
+
+		Session struct {
+			// The allowed session timeout for registered consumers (defaults to 30s).
+			// Must be within the allowed server range.
+			Timeout time.Duration
+		}
+
+		Heartbeat struct {
+			// Interval between each heartbeat (defaults to 3s). It should be no more
+			// than 1/3rd of the Group.Session.Timout setting
+			Interval time.Duration
+		}
+
+		// Return specifies which group channels will be populated. If they are set to true,
+		// you must read from the respective channels to prevent deadlock.
+		Return struct {
+			// If enabled, rebalance notification will be returned on the
+			// Notifications channel (default disabled).
+			Notifications bool
+		}
+
+		Topics struct {
+			// An additional whitelist of topics to subscribe to.
+			Whitelist *regexp.Regexp
+			// An additional blacklist of topics to avoid. If set, this will precede over
+			// the Whitelist setting.
+			Blacklist *regexp.Regexp
+		}
+
+		Member struct {
+			// Custom metadata to include when joining the group. The user data for all joined members
+			// can be retrieved by sending a DescribeGroupRequest to the broker that is the
+			// coordinator for the group.
+			UserData []byte
+		}
+	}
+}
+
+// NewConfig returns a new configuration instance with sane defaults.
+func NewConfig() *Config {
+	c := &Config{
+		Config: *sarama.NewConfig(),
+	}
+	c.Group.PartitionStrategy = StrategyRange
+	c.Group.Offsets.Retry.Max = 3
+	c.Group.Offsets.Synchronization.DwellTime = c.Consumer.MaxProcessingTime
+	c.Group.Session.Timeout = 30 * time.Second
+	c.Group.Heartbeat.Interval = 3 * time.Second
+	c.Config.Version = minVersion
+	return c
+}
+
+// Validate checks a Config instance. It will return a
+// sarama.ConfigurationError if the specified values don't make sense.
+func (c *Config) Validate() error {
+	if c.Group.Heartbeat.Interval%time.Millisecond != 0 {
+		sarama.Logger.Println("Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.Group.Session.Timeout%time.Millisecond != 0 {
+		sarama.Logger.Println("Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.Group.PartitionStrategy != StrategyRange && c.Group.PartitionStrategy != StrategyRoundRobin {
+		sarama.Logger.Println("Group.PartitionStrategy is not supported; range will be assumed.")
+	}
+	if !c.Version.IsAtLeast(minVersion) {
+		sarama.Logger.Println("Version is not supported; 0.9. will be assumed.")
+		c.Version = minVersion
+	}
+	if err := c.Config.Validate(); err != nil {
+		return err
+	}
+
+	// validate the Group values
+	switch {
+	case c.Group.Offsets.Retry.Max < 0:
+		return sarama.ConfigurationError("Group.Offsets.Retry.Max must be >= 0")
+	case c.Group.Offsets.Synchronization.DwellTime <= 0:
+		return sarama.ConfigurationError("Group.Offsets.Synchronization.DwellTime must be > 0")
+	case c.Group.Offsets.Synchronization.DwellTime > 10*time.Minute:
+		return sarama.ConfigurationError("Group.Offsets.Synchronization.DwellTime must be <= 10m")
+	case c.Group.Heartbeat.Interval <= 0:
+		return sarama.ConfigurationError("Group.Heartbeat.Interval must be > 0")
+	case c.Group.Session.Timeout <= 0:
+		return sarama.ConfigurationError("Group.Session.Timeout must be > 0")
+	case !c.Metadata.Full && c.Group.Topics.Whitelist != nil:
+		return sarama.ConfigurationError("Metadata.Full must be enabled when Group.Topics.Whitelist is used")
+	case !c.Metadata.Full && c.Group.Topics.Blacklist != nil:
+		return sarama.ConfigurationError("Metadata.Full must be enabled when Group.Topics.Blacklist is used")
+	}
+
+	// ensure offset is correct
+	switch c.Consumer.Offsets.Initial {
+	case sarama.OffsetOldest, sarama.OffsetNewest:
+	default:
+		return sarama.ConfigurationError("Consumer.Offsets.Initial must be either OffsetOldest or OffsetNewest")
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/bsm/sarama-cluster/consumer.go b/vendor/github.com/bsm/sarama-cluster/consumer.go
new file mode 100644
index 0000000..e7a67da
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/consumer.go
@@ -0,0 +1,919 @@
+package cluster
+
+import (
+	"sort"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/Shopify/sarama"
+)
+
+// Consumer is a cluster group consumer
+type Consumer struct {
+	client    *Client
+	ownClient bool
+
+	consumer sarama.Consumer
+	subs     *partitionMap
+
+	consumerID string
+	groupID    string
+
+	memberID     string
+	generationID int32
+	membershipMu sync.RWMutex
+
+	coreTopics  []string
+	extraTopics []string
+
+	dying, dead chan none
+	closeOnce   sync.Once
+
+	consuming     int32
+	messages      chan *sarama.ConsumerMessage
+	errors        chan error
+	partitions    chan PartitionConsumer
+	notifications chan *Notification
+
+	commitMu sync.Mutex
+}
+
+// NewConsumer initializes a new consumer
+func NewConsumer(addrs []string, groupID string, topics []string, config *Config) (*Consumer, error) {
+	client, err := NewClient(addrs, config)
+	if err != nil {
+		return nil, err
+	}
+
+	consumer, err := NewConsumerFromClient(client, groupID, topics)
+	if err != nil {
+		return nil, err
+	}
+	consumer.ownClient = true
+	return consumer, nil
+}
+
+// NewConsumerFromClient initializes a new consumer from an existing client.
+//
+// Please note that clients cannot be shared between consumers (due to Kafka internals),
+// they can only be re-used which requires the user to call Close() on the first consumer
+// before using this method again to initialize another one. Attempts to use a client with
+// more than one consumer at a time will return errors.
+func NewConsumerFromClient(client *Client, groupID string, topics []string) (*Consumer, error) {
+	if !client.claim() {
+		return nil, errClientInUse
+	}
+
+	consumer, err := sarama.NewConsumerFromClient(client.Client)
+	if err != nil {
+		client.release()
+		return nil, err
+	}
+
+	sort.Strings(topics)
+	c := &Consumer{
+		client:   client,
+		consumer: consumer,
+		subs:     newPartitionMap(),
+		groupID:  groupID,
+
+		coreTopics: topics,
+
+		dying: make(chan none),
+		dead:  make(chan none),
+
+		messages:      make(chan *sarama.ConsumerMessage),
+		errors:        make(chan error, client.config.ChannelBufferSize),
+		partitions:    make(chan PartitionConsumer, 1),
+		notifications: make(chan *Notification),
+	}
+	if err := c.client.RefreshCoordinator(groupID); err != nil {
+		client.release()
+		return nil, err
+	}
+
+	go c.mainLoop()
+	return c, nil
+}
+
+// Messages returns the read channel for the messages that are returned by
+// the broker.
+//
+// This channel will only return if Config.Group.Mode option is set to
+// ConsumerModeMultiplex (default).
+func (c *Consumer) Messages() <-chan *sarama.ConsumerMessage { return c.messages }
+
+// Partitions returns the read channels for individual partitions of this broker.
+//
+// This will channel will only return if Config.Group.Mode option is set to
+// ConsumerModePartitions.
+//
+// The Partitions() channel must be listened to for the life of this consumer;
+// when a rebalance happens old partitions will be closed (naturally come to
+// completion) and new ones will be emitted. The returned channel will only close
+// when the consumer is completely shut down.
+func (c *Consumer) Partitions() <-chan PartitionConsumer { return c.partitions }
+
+// Errors returns a read channel of errors that occur during offset management, if
+// enabled. By default, errors are logged and not returned over this channel. If
+// you want to implement any custom error handling, set your config's
+// Consumer.Return.Errors setting to true, and read from this channel.
+func (c *Consumer) Errors() <-chan error { return c.errors }
+
+// Notifications returns a channel of Notifications that occur during consumer
+// rebalancing. Notifications will only be emitted over this channel, if your config's
+// Group.Return.Notifications setting to true.
+func (c *Consumer) Notifications() <-chan *Notification { return c.notifications }
+
+// HighWaterMarks returns the current high water marks for each topic and partition
+// Consistency between partitions is not guaranteed since high water marks are updated separately.
+func (c *Consumer) HighWaterMarks() map[string]map[int32]int64 { return c.consumer.HighWaterMarks() }
+
+// MarkOffset marks the provided message as processed, alongside a metadata string
+// that represents the state of the partition consumer at that point in time. The
+// metadata string can be used by another consumer to restore that state, so it
+// can resume consumption.
+//
+// Note: calling MarkOffset does not necessarily commit the offset to the backend
+// store immediately for efficiency reasons, and it may never be committed if
+// your application crashes. This means that you may end up processing the same
+// message twice, and your processing should ideally be idempotent.
+func (c *Consumer) MarkOffset(msg *sarama.ConsumerMessage, metadata string) {
+	if sub := c.subs.Fetch(msg.Topic, msg.Partition); sub != nil {
+		sub.MarkOffset(msg.Offset, metadata)
+	}
+}
+
+// MarkPartitionOffset marks an offset of the provided topic/partition as processed.
+// See MarkOffset for additional explanation.
+func (c *Consumer) MarkPartitionOffset(topic string, partition int32, offset int64, metadata string) {
+	if sub := c.subs.Fetch(topic, partition); sub != nil {
+		sub.MarkOffset(offset, metadata)
+	}
+}
+
+// MarkOffsets marks stashed offsets as processed.
+// See MarkOffset for additional explanation.
+func (c *Consumer) MarkOffsets(s *OffsetStash) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	for tp, info := range s.offsets {
+		if sub := c.subs.Fetch(tp.Topic, tp.Partition); sub != nil {
+			sub.MarkOffset(info.Offset, info.Metadata)
+		}
+		delete(s.offsets, tp)
+	}
+}
+
+// ResetOffsets marks the provided message as processed, alongside a metadata string
+// that represents the state of the partition consumer at that point in time. The
+// metadata string can be used by another consumer to restore that state, so it
+// can resume consumption.
+//
+// Difference between ResetOffset and MarkOffset is that it allows to rewind to an earlier offset
+func (c *Consumer) ResetOffset(msg *sarama.ConsumerMessage, metadata string) {
+	if sub := c.subs.Fetch(msg.Topic, msg.Partition); sub != nil {
+		sub.ResetOffset(msg.Offset, metadata)
+	}
+}
+
+// ResetPartitionOffset marks an offset of the provided topic/partition as processed.
+// See ResetOffset for additional explanation.
+func (c *Consumer) ResetPartitionOffset(topic string, partition int32, offset int64, metadata string) {
+	sub := c.subs.Fetch(topic, partition)
+	if sub != nil {
+		sub.ResetOffset(offset, metadata)
+	}
+}
+
+// ResetOffsets marks stashed offsets as processed.
+// See ResetOffset for additional explanation.
+func (c *Consumer) ResetOffsets(s *OffsetStash) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	for tp, info := range s.offsets {
+		if sub := c.subs.Fetch(tp.Topic, tp.Partition); sub != nil {
+			sub.ResetOffset(info.Offset, info.Metadata)
+		}
+		delete(s.offsets, tp)
+	}
+}
+
+// Subscriptions returns the consumed topics and partitions
+func (c *Consumer) Subscriptions() map[string][]int32 {
+	return c.subs.Info()
+}
+
+// CommitOffsets allows to manually commit previously marked offsets. By default there is no
+// need to call this function as the consumer will commit offsets automatically
+// using the Config.Consumer.Offsets.CommitInterval setting.
+//
+// Please be aware that calling this function during an internal rebalance cycle may return
+// broker errors (e.g. sarama.ErrUnknownMemberId or sarama.ErrIllegalGeneration).
+func (c *Consumer) CommitOffsets() error {
+	c.commitMu.Lock()
+	defer c.commitMu.Unlock()
+
+	memberID, generationID := c.membership()
+	req := &sarama.OffsetCommitRequest{
+		Version:                 2,
+		ConsumerGroup:           c.groupID,
+		ConsumerGroupGeneration: generationID,
+		ConsumerID:              memberID,
+		RetentionTime:           -1,
+	}
+
+	if ns := c.client.config.Consumer.Offsets.Retention; ns != 0 {
+		req.RetentionTime = int64(ns / time.Millisecond)
+	}
+
+	snap := c.subs.Snapshot()
+	dirty := false
+	for tp, state := range snap {
+		if state.Dirty {
+			dirty = true
+			req.AddBlock(tp.Topic, tp.Partition, state.Info.Offset, 0, state.Info.Metadata)
+		}
+	}
+	if !dirty {
+		return nil
+	}
+
+	broker, err := c.client.Coordinator(c.groupID)
+	if err != nil {
+		c.closeCoordinator(broker, err)
+		return err
+	}
+
+	resp, err := broker.CommitOffset(req)
+	if err != nil {
+		c.closeCoordinator(broker, err)
+		return err
+	}
+
+	for topic, errs := range resp.Errors {
+		for partition, kerr := range errs {
+			if kerr != sarama.ErrNoError {
+				err = kerr
+			} else if state, ok := snap[topicPartition{topic, partition}]; ok {
+				if sub := c.subs.Fetch(topic, partition); sub != nil {
+					sub.markCommitted(state.Info.Offset)
+				}
+			}
+		}
+	}
+	return err
+}
+
+// Close safely closes the consumer and releases all resources
+func (c *Consumer) Close() (err error) {
+	c.closeOnce.Do(func() {
+		close(c.dying)
+		<-c.dead
+
+		if e := c.release(); e != nil {
+			err = e
+		}
+		if e := c.consumer.Close(); e != nil {
+			err = e
+		}
+		close(c.messages)
+		close(c.errors)
+
+		if e := c.leaveGroup(); e != nil {
+			err = e
+		}
+		close(c.partitions)
+		close(c.notifications)
+
+		// drain
+		for range c.messages {
+		}
+		for range c.errors {
+		}
+		for p := range c.partitions {
+			_ = p.Close()
+		}
+		for range c.notifications {
+		}
+
+		c.client.release()
+		if c.ownClient {
+			if e := c.client.Close(); e != nil {
+				err = e
+			}
+		}
+	})
+	return
+}
+
+func (c *Consumer) mainLoop() {
+	defer close(c.dead)
+	defer atomic.StoreInt32(&c.consuming, 0)
+
+	for {
+		atomic.StoreInt32(&c.consuming, 0)
+
+		// Check if close was requested
+		select {
+		case <-c.dying:
+			return
+		default:
+		}
+
+		// Start next consume cycle
+		c.nextTick()
+	}
+}
+
+func (c *Consumer) nextTick() {
+	// Remember previous subscriptions
+	var notification *Notification
+	if c.client.config.Group.Return.Notifications {
+		notification = newNotification(c.subs.Info())
+	}
+
+	// Refresh coordinator
+	if err := c.refreshCoordinator(); err != nil {
+		c.rebalanceError(err, nil)
+		return
+	}
+
+	// Release subscriptions
+	if err := c.release(); err != nil {
+		c.rebalanceError(err, nil)
+		return
+	}
+
+	// Issue rebalance start notification
+	if c.client.config.Group.Return.Notifications {
+		c.handleNotification(notification)
+	}
+
+	// Rebalance, fetch new subscriptions
+	subs, err := c.rebalance()
+	if err != nil {
+		c.rebalanceError(err, notification)
+		return
+	}
+
+	// Coordinate loops, make sure everything is
+	// stopped on exit
+	tomb := newLoopTomb()
+	defer tomb.Close()
+
+	// Start the heartbeat
+	tomb.Go(c.hbLoop)
+
+	// Subscribe to topic/partitions
+	if err := c.subscribe(tomb, subs); err != nil {
+		c.rebalanceError(err, notification)
+		return
+	}
+
+	// Update/issue notification with new claims
+	if c.client.config.Group.Return.Notifications {
+		notification = notification.success(subs)
+		c.handleNotification(notification)
+	}
+
+	// Start topic watcher loop
+	tomb.Go(c.twLoop)
+
+	// Start consuming and committing offsets
+	tomb.Go(c.cmLoop)
+	atomic.StoreInt32(&c.consuming, 1)
+
+	// Wait for signals
+	select {
+	case <-tomb.Dying():
+	case <-c.dying:
+	}
+}
+
+// heartbeat loop, triggered by the mainLoop
+func (c *Consumer) hbLoop(stopped <-chan none) {
+	ticker := time.NewTicker(c.client.config.Group.Heartbeat.Interval)
+	defer ticker.Stop()
+
+	for {
+		select {
+		case <-ticker.C:
+			switch err := c.heartbeat(); err {
+			case nil, sarama.ErrNoError:
+			case sarama.ErrNotCoordinatorForConsumer, sarama.ErrRebalanceInProgress:
+				return
+			default:
+				c.handleError(&Error{Ctx: "heartbeat", error: err})
+				return
+			}
+		case <-stopped:
+			return
+		case <-c.dying:
+			return
+		}
+	}
+}
+
+// topic watcher loop, triggered by the mainLoop
+func (c *Consumer) twLoop(stopped <-chan none) {
+	ticker := time.NewTicker(c.client.config.Metadata.RefreshFrequency / 2)
+	defer ticker.Stop()
+
+	for {
+		select {
+		case <-ticker.C:
+			topics, err := c.client.Topics()
+			if err != nil {
+				c.handleError(&Error{Ctx: "topics", error: err})
+				return
+			}
+
+			for _, topic := range topics {
+				if !c.isKnownCoreTopic(topic) &&
+					!c.isKnownExtraTopic(topic) &&
+					c.isPotentialExtraTopic(topic) {
+					return
+				}
+			}
+		case <-stopped:
+			return
+		case <-c.dying:
+			return
+		}
+	}
+}
+
+// commit loop, triggered by the mainLoop
+func (c *Consumer) cmLoop(stopped <-chan none) {
+	ticker := time.NewTicker(c.client.config.Consumer.Offsets.CommitInterval)
+	defer ticker.Stop()
+
+	for {
+		select {
+		case <-ticker.C:
+			if err := c.commitOffsetsWithRetry(c.client.config.Group.Offsets.Retry.Max); err != nil {
+				c.handleError(&Error{Ctx: "commit", error: err})
+				return
+			}
+		case <-stopped:
+			return
+		case <-c.dying:
+			return
+		}
+	}
+}
+
+func (c *Consumer) rebalanceError(err error, n *Notification) {
+	if n != nil {
+		n.Type = RebalanceError
+		c.handleNotification(n)
+	}
+
+	switch err {
+	case sarama.ErrRebalanceInProgress:
+	default:
+		c.handleError(&Error{Ctx: "rebalance", error: err})
+	}
+
+	select {
+	case <-c.dying:
+	case <-time.After(c.client.config.Metadata.Retry.Backoff):
+	}
+}
+
+func (c *Consumer) handleNotification(n *Notification) {
+	if c.client.config.Group.Return.Notifications {
+		select {
+		case c.notifications <- n:
+		case <-c.dying:
+			return
+		}
+	}
+}
+
+func (c *Consumer) handleError(e *Error) {
+	if c.client.config.Consumer.Return.Errors {
+		select {
+		case c.errors <- e:
+		case <-c.dying:
+			return
+		}
+	} else {
+		sarama.Logger.Printf("%s error: %s\n", e.Ctx, e.Error())
+	}
+}
+
+// Releases the consumer and commits offsets, called from rebalance() and Close()
+func (c *Consumer) release() (err error) {
+	// Stop all consumers
+	c.subs.Stop()
+
+	// Clear subscriptions on exit
+	defer c.subs.Clear()
+
+	// Wait for messages to be processed
+	timeout := time.NewTimer(c.client.config.Group.Offsets.Synchronization.DwellTime)
+	defer timeout.Stop()
+
+	select {
+	case <-c.dying:
+	case <-timeout.C:
+	}
+
+	// Commit offsets, continue on errors
+	if e := c.commitOffsetsWithRetry(c.client.config.Group.Offsets.Retry.Max); e != nil {
+		err = e
+	}
+
+	return
+}
+
+// --------------------------------------------------------------------
+
+// Performs a heartbeat, part of the mainLoop()
+func (c *Consumer) heartbeat() error {
+	broker, err := c.client.Coordinator(c.groupID)
+	if err != nil {
+		c.closeCoordinator(broker, err)
+		return err
+	}
+
+	memberID, generationID := c.membership()
+	resp, err := broker.Heartbeat(&sarama.HeartbeatRequest{
+		GroupId:      c.groupID,
+		MemberId:     memberID,
+		GenerationId: generationID,
+	})
+	if err != nil {
+		c.closeCoordinator(broker, err)
+		return err
+	}
+	return resp.Err
+}
+
+// Performs a rebalance, part of the mainLoop()
+func (c *Consumer) rebalance() (map[string][]int32, error) {
+	memberID, _ := c.membership()
+	sarama.Logger.Printf("cluster/consumer %s rebalance\n", memberID)
+
+	allTopics, err := c.client.Topics()
+	if err != nil {
+		return nil, err
+	}
+	c.extraTopics = c.selectExtraTopics(allTopics)
+	sort.Strings(c.extraTopics)
+
+	// Re-join consumer group
+	strategy, err := c.joinGroup()
+	switch {
+	case err == sarama.ErrUnknownMemberId:
+		c.membershipMu.Lock()
+		c.memberID = ""
+		c.membershipMu.Unlock()
+		return nil, err
+	case err != nil:
+		return nil, err
+	}
+
+	// Sync consumer group state, fetch subscriptions
+	subs, err := c.syncGroup(strategy)
+	switch {
+	case err == sarama.ErrRebalanceInProgress:
+		return nil, err
+	case err != nil:
+		_ = c.leaveGroup()
+		return nil, err
+	}
+	return subs, nil
+}
+
+// Performs the subscription, part of the mainLoop()
+func (c *Consumer) subscribe(tomb *loopTomb, subs map[string][]int32) error {
+	// fetch offsets
+	offsets, err := c.fetchOffsets(subs)
+	if err != nil {
+		_ = c.leaveGroup()
+		return err
+	}
+
+	// create consumers in parallel
+	var mu sync.Mutex
+	var wg sync.WaitGroup
+
+	for topic, partitions := range subs {
+		for _, partition := range partitions {
+			wg.Add(1)
+
+			info := offsets[topic][partition]
+			go func(topic string, partition int32) {
+				if e := c.createConsumer(tomb, topic, partition, info); e != nil {
+					mu.Lock()
+					err = e
+					mu.Unlock()
+				}
+				wg.Done()
+			}(topic, partition)
+		}
+	}
+	wg.Wait()
+
+	if err != nil {
+		_ = c.release()
+		_ = c.leaveGroup()
+	}
+	return err
+}
+
+// --------------------------------------------------------------------
+
+// Send a request to the broker to join group on rebalance()
+func (c *Consumer) joinGroup() (*balancer, error) {
+	memberID, _ := c.membership()
+	req := &sarama.JoinGroupRequest{
+		GroupId:        c.groupID,
+		MemberId:       memberID,
+		SessionTimeout: int32(c.client.config.Group.Session.Timeout / time.Millisecond),
+		ProtocolType:   "consumer",
+	}
+
+	meta := &sarama.ConsumerGroupMemberMetadata{
+		Version:  1,
+		Topics:   append(c.coreTopics, c.extraTopics...),
+		UserData: c.client.config.Group.Member.UserData,
+	}
+	err := req.AddGroupProtocolMetadata(string(StrategyRange), meta)
+	if err != nil {
+		return nil, err
+	}
+	err = req.AddGroupProtocolMetadata(string(StrategyRoundRobin), meta)
+	if err != nil {
+		return nil, err
+	}
+
+	broker, err := c.client.Coordinator(c.groupID)
+	if err != nil {
+		c.closeCoordinator(broker, err)
+		return nil, err
+	}
+
+	resp, err := broker.JoinGroup(req)
+	if err != nil {
+		c.closeCoordinator(broker, err)
+		return nil, err
+	} else if resp.Err != sarama.ErrNoError {
+		c.closeCoordinator(broker, resp.Err)
+		return nil, resp.Err
+	}
+
+	var strategy *balancer
+	if resp.LeaderId == resp.MemberId {
+		members, err := resp.GetMembers()
+		if err != nil {
+			return nil, err
+		}
+
+		strategy, err = newBalancerFromMeta(c.client, members)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	c.membershipMu.Lock()
+	c.memberID = resp.MemberId
+	c.generationID = resp.GenerationId
+	c.membershipMu.Unlock()
+
+	return strategy, nil
+}
+
+// Send a request to the broker to sync the group on rebalance().
+// Returns a list of topics and partitions to consume.
+func (c *Consumer) syncGroup(strategy *balancer) (map[string][]int32, error) {
+	memberID, generationID := c.membership()
+	req := &sarama.SyncGroupRequest{
+		GroupId:      c.groupID,
+		MemberId:     memberID,
+		GenerationId: generationID,
+	}
+
+	if strategy != nil {
+		for memberID, topics := range strategy.Perform(c.client.config.Group.PartitionStrategy) {
+			if err := req.AddGroupAssignmentMember(memberID, &sarama.ConsumerGroupMemberAssignment{
+				Topics: topics,
+			}); err != nil {
+				return nil, err
+			}
+		}
+	}
+
+	broker, err := c.client.Coordinator(c.groupID)
+	if err != nil {
+		c.closeCoordinator(broker, err)
+		return nil, err
+	}
+
+	resp, err := broker.SyncGroup(req)
+	if err != nil {
+		c.closeCoordinator(broker, err)
+		return nil, err
+	} else if resp.Err != sarama.ErrNoError {
+		c.closeCoordinator(broker, resp.Err)
+		return nil, resp.Err
+	}
+
+	// Return if there is nothing to subscribe to
+	if len(resp.MemberAssignment) == 0 {
+		return nil, nil
+	}
+
+	// Get assigned subscriptions
+	members, err := resp.GetMemberAssignment()
+	if err != nil {
+		return nil, err
+	}
+
+	// Sort partitions, for each topic
+	for topic := range members.Topics {
+		sort.Sort(int32Slice(members.Topics[topic]))
+	}
+	return members.Topics, nil
+}
+
+// Fetches latest committed offsets for all subscriptions
+func (c *Consumer) fetchOffsets(subs map[string][]int32) (map[string]map[int32]offsetInfo, error) {
+	offsets := make(map[string]map[int32]offsetInfo, len(subs))
+	req := &sarama.OffsetFetchRequest{
+		Version:       1,
+		ConsumerGroup: c.groupID,
+	}
+
+	for topic, partitions := range subs {
+		offsets[topic] = make(map[int32]offsetInfo, len(partitions))
+		for _, partition := range partitions {
+			offsets[topic][partition] = offsetInfo{Offset: -1}
+			req.AddPartition(topic, partition)
+		}
+	}
+
+	broker, err := c.client.Coordinator(c.groupID)
+	if err != nil {
+		c.closeCoordinator(broker, err)
+		return nil, err
+	}
+
+	resp, err := broker.FetchOffset(req)
+	if err != nil {
+		c.closeCoordinator(broker, err)
+		return nil, err
+	}
+
+	for topic, partitions := range subs {
+		for _, partition := range partitions {
+			block := resp.GetBlock(topic, partition)
+			if block == nil {
+				return nil, sarama.ErrIncompleteResponse
+			}
+
+			if block.Err == sarama.ErrNoError {
+				offsets[topic][partition] = offsetInfo{Offset: block.Offset, Metadata: block.Metadata}
+			} else {
+				return nil, block.Err
+			}
+		}
+	}
+	return offsets, nil
+}
+
+// Send a request to the broker to leave the group on failes rebalance() and on Close()
+func (c *Consumer) leaveGroup() error {
+	broker, err := c.client.Coordinator(c.groupID)
+	if err != nil {
+		c.closeCoordinator(broker, err)
+		return err
+	}
+
+	memberID, _ := c.membership()
+	if _, err = broker.LeaveGroup(&sarama.LeaveGroupRequest{
+		GroupId:  c.groupID,
+		MemberId: memberID,
+	}); err != nil {
+		c.closeCoordinator(broker, err)
+	}
+	return err
+}
+
+// --------------------------------------------------------------------
+
+func (c *Consumer) createConsumer(tomb *loopTomb, topic string, partition int32, info offsetInfo) error {
+	memberID, _ := c.membership()
+	sarama.Logger.Printf("cluster/consumer %s consume %s/%d from %d\n", memberID, topic, partition, info.NextOffset(c.client.config.Consumer.Offsets.Initial))
+
+	// Create partitionConsumer
+	pc, err := newPartitionConsumer(c.consumer, topic, partition, info, c.client.config.Consumer.Offsets.Initial)
+	if err != nil {
+		return err
+	}
+
+	// Store in subscriptions
+	c.subs.Store(topic, partition, pc)
+
+	// Start partition consumer goroutine
+	tomb.Go(func(stopper <-chan none) {
+		if c.client.config.Group.Mode == ConsumerModePartitions {
+			pc.waitFor(stopper, c.errors)
+		} else {
+			pc.multiplex(stopper, c.messages, c.errors)
+		}
+	})
+
+	if c.client.config.Group.Mode == ConsumerModePartitions {
+		c.partitions <- pc
+	}
+	return nil
+}
+
+func (c *Consumer) commitOffsetsWithRetry(retries int) error {
+	err := c.CommitOffsets()
+	if err != nil && retries > 0 {
+		return c.commitOffsetsWithRetry(retries - 1)
+	}
+	return err
+}
+
+func (c *Consumer) closeCoordinator(broker *sarama.Broker, err error) {
+	if broker != nil {
+		_ = broker.Close()
+	}
+
+	switch err {
+	case sarama.ErrConsumerCoordinatorNotAvailable, sarama.ErrNotCoordinatorForConsumer:
+		_ = c.client.RefreshCoordinator(c.groupID)
+	}
+}
+
+func (c *Consumer) selectExtraTopics(allTopics []string) []string {
+	extra := allTopics[:0]
+	for _, topic := range allTopics {
+		if !c.isKnownCoreTopic(topic) && c.isPotentialExtraTopic(topic) {
+			extra = append(extra, topic)
+		}
+	}
+	return extra
+}
+
+func (c *Consumer) isKnownCoreTopic(topic string) bool {
+	pos := sort.SearchStrings(c.coreTopics, topic)
+	return pos < len(c.coreTopics) && c.coreTopics[pos] == topic
+}
+
+func (c *Consumer) isKnownExtraTopic(topic string) bool {
+	pos := sort.SearchStrings(c.extraTopics, topic)
+	return pos < len(c.extraTopics) && c.extraTopics[pos] == topic
+}
+
+func (c *Consumer) isPotentialExtraTopic(topic string) bool {
+	rx := c.client.config.Group.Topics
+	if rx.Blacklist != nil && rx.Blacklist.MatchString(topic) {
+		return false
+	}
+	if rx.Whitelist != nil && rx.Whitelist.MatchString(topic) {
+		return true
+	}
+	return false
+}
+
+func (c *Consumer) refreshCoordinator() error {
+	if err := c.refreshMetadata(); err != nil {
+		return err
+	}
+	return c.client.RefreshCoordinator(c.groupID)
+}
+
+func (c *Consumer) refreshMetadata() (err error) {
+	if c.client.config.Metadata.Full {
+		err = c.client.RefreshMetadata()
+	} else {
+		var topics []string
+		if topics, err = c.client.Topics(); err == nil && len(topics) != 0 {
+			err = c.client.RefreshMetadata(topics...)
+		}
+	}
+
+	// maybe we didn't have authorization to describe all topics
+	switch err {
+	case sarama.ErrTopicAuthorizationFailed:
+		err = c.client.RefreshMetadata(c.coreTopics...)
+	}
+	return
+}
+
+func (c *Consumer) membership() (memberID string, generationID int32) {
+	c.membershipMu.RLock()
+	memberID, generationID = c.memberID, c.generationID
+	c.membershipMu.RUnlock()
+	return
+}
diff --git a/vendor/github.com/bsm/sarama-cluster/doc.go b/vendor/github.com/bsm/sarama-cluster/doc.go
new file mode 100644
index 0000000..9c8ff16
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/doc.go
@@ -0,0 +1,8 @@
+/*
+Package cluster provides cluster extensions for Sarama, enabing users
+to consume topics across from multiple, balanced nodes.
+
+It requires Kafka v0.9+ and follows the steps guide, described in:
+https://cwiki.apache.org/confluence/display/KAFKA/Kafka+0.9+Consumer+Rewrite+Design
+*/
+package cluster
diff --git a/vendor/github.com/bsm/sarama-cluster/offsets.go b/vendor/github.com/bsm/sarama-cluster/offsets.go
new file mode 100644
index 0000000..4223ac5
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/offsets.go
@@ -0,0 +1,69 @@
+package cluster
+
+import (
+	"sync"
+
+	"github.com/Shopify/sarama"
+)
+
+// OffsetStash allows to accumulate offsets and
+// mark them as processed in a bulk
+type OffsetStash struct {
+	offsets map[topicPartition]offsetInfo
+	mu      sync.Mutex
+}
+
+// NewOffsetStash inits a blank stash
+func NewOffsetStash() *OffsetStash {
+	return &OffsetStash{offsets: make(map[topicPartition]offsetInfo)}
+}
+
+// MarkOffset stashes the provided message offset
+func (s *OffsetStash) MarkOffset(msg *sarama.ConsumerMessage, metadata string) {
+	s.MarkPartitionOffset(msg.Topic, msg.Partition, msg.Offset, metadata)
+}
+
+// MarkPartitionOffset stashes the offset for the provided topic/partition combination
+func (s *OffsetStash) MarkPartitionOffset(topic string, partition int32, offset int64, metadata string) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	key := topicPartition{Topic: topic, Partition: partition}
+	if info := s.offsets[key]; offset >= info.Offset {
+		info.Offset = offset
+		info.Metadata = metadata
+		s.offsets[key] = info
+	}
+}
+
+// ResetPartitionOffset stashes the offset for the provided topic/partition combination.
+// Difference between ResetPartitionOffset and MarkPartitionOffset is that, ResetPartitionOffset supports earlier offsets
+func (s *OffsetStash) ResetPartitionOffset(topic string, partition int32, offset int64, metadata string) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	key := topicPartition{Topic: topic, Partition: partition}
+	if info := s.offsets[key]; offset <= info.Offset {
+		info.Offset = offset
+		info.Metadata = metadata
+		s.offsets[key] = info
+	}
+}
+
+// ResetOffset stashes the provided message offset
+// See ResetPartitionOffset for explanation
+func (s *OffsetStash) ResetOffset(msg *sarama.ConsumerMessage, metadata string) {
+	s.ResetPartitionOffset(msg.Topic, msg.Partition, msg.Offset, metadata)
+}
+
+// Offsets returns the latest stashed offsets by topic-partition
+func (s *OffsetStash) Offsets() map[string]int64 {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	res := make(map[string]int64, len(s.offsets))
+	for tp, info := range s.offsets {
+		res[tp.String()] = info.Offset
+	}
+	return res
+}
diff --git a/vendor/github.com/bsm/sarama-cluster/partitions.go b/vendor/github.com/bsm/sarama-cluster/partitions.go
new file mode 100644
index 0000000..bfaa587
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/partitions.go
@@ -0,0 +1,290 @@
+package cluster
+
+import (
+	"sort"
+	"sync"
+	"time"
+
+	"github.com/Shopify/sarama"
+)
+
+// PartitionConsumer allows code to consume individual partitions from the cluster.
+//
+// See docs for Consumer.Partitions() for more on how to implement this.
+type PartitionConsumer interface {
+	sarama.PartitionConsumer
+
+	// Topic returns the consumed topic name
+	Topic() string
+
+	// Partition returns the consumed partition
+	Partition() int32
+
+	// InitialOffset returns the offset used for creating the PartitionConsumer instance.
+	// The returned offset can be a literal offset, or OffsetNewest, or OffsetOldest
+	InitialOffset() int64
+  
+	// MarkOffset marks the offset of a message as preocessed.
+	MarkOffset(offset int64, metadata string)
+
+	// ResetOffset resets the offset to a previously processed message.
+	ResetOffset(offset int64, metadata string)
+}
+
+type partitionConsumer struct {
+	sarama.PartitionConsumer
+
+	state partitionState
+	mu    sync.Mutex
+
+	topic         string
+	partition     int32
+	initialOffset int64
+
+	closeOnce sync.Once
+	closeErr  error
+
+	dying, dead chan none
+}
+
+func newPartitionConsumer(manager sarama.Consumer, topic string, partition int32, info offsetInfo, defaultOffset int64) (*partitionConsumer, error) {
+	offset := info.NextOffset(defaultOffset)
+	pcm, err := manager.ConsumePartition(topic, partition, offset)
+
+	// Resume from default offset, if requested offset is out-of-range
+	if err == sarama.ErrOffsetOutOfRange {
+		info.Offset = -1
+		offset = defaultOffset
+		pcm, err = manager.ConsumePartition(topic, partition, offset)
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	return &partitionConsumer{
+		PartitionConsumer: pcm,
+		state:             partitionState{Info: info},
+
+		topic:         topic,
+		partition:     partition,
+		initialOffset: offset,
+
+		dying: make(chan none),
+		dead:  make(chan none),
+	}, nil
+}
+
+// Topic implements PartitionConsumer
+func (c *partitionConsumer) Topic() string { return c.topic }
+
+// Partition implements PartitionConsumer
+func (c *partitionConsumer) Partition() int32 { return c.partition }
+
+// InitialOffset implements PartitionConsumer
+func (c *partitionConsumer) InitialOffset() int64 { return c.initialOffset }
+
+// AsyncClose implements PartitionConsumer
+func (c *partitionConsumer) AsyncClose() {
+	c.closeOnce.Do(func() {
+		c.closeErr = c.PartitionConsumer.Close()
+		close(c.dying)
+	})
+}
+
+// Close implements PartitionConsumer
+func (c *partitionConsumer) Close() error {
+	c.AsyncClose()
+	<-c.dead
+	return c.closeErr
+}
+
+func (c *partitionConsumer) waitFor(stopper <-chan none, errors chan<- error) {
+	defer close(c.dead)
+
+	for {
+		select {
+		case err, ok := <-c.Errors():
+			if !ok {
+				return
+			}
+			select {
+			case errors <- err:
+			case <-stopper:
+				return
+			case <-c.dying:
+				return
+			}
+		case <-stopper:
+			return
+		case <-c.dying:
+			return
+		}
+	}
+}
+
+func (c *partitionConsumer) multiplex(stopper <-chan none, messages chan<- *sarama.ConsumerMessage, errors chan<- error) {
+	defer close(c.dead)
+
+	for {
+		select {
+		case msg, ok := <-c.Messages():
+			if !ok {
+				return
+			}
+			select {
+			case messages <- msg:
+			case <-stopper:
+				return
+			case <-c.dying:
+				return
+			}
+		case err, ok := <-c.Errors():
+			if !ok {
+				return
+			}
+			select {
+			case errors <- err:
+			case <-stopper:
+				return
+			case <-c.dying:
+				return
+			}
+		case <-stopper:
+			return
+		case <-c.dying:
+			return
+		}
+	}
+}
+
+func (c *partitionConsumer) getState() partitionState {
+	c.mu.Lock()
+	state := c.state
+	c.mu.Unlock()
+
+	return state
+}
+
+func (c *partitionConsumer) markCommitted(offset int64) {
+	c.mu.Lock()
+	if offset == c.state.Info.Offset {
+		c.state.Dirty = false
+	}
+	c.mu.Unlock()
+}
+
+// MarkOffset implements PartitionConsumer
+func (c *partitionConsumer) MarkOffset(offset int64, metadata string) {
+	c.mu.Lock()
+	if next := offset + 1; next > c.state.Info.Offset {
+		c.state.Info.Offset = next
+		c.state.Info.Metadata = metadata
+		c.state.Dirty = true
+	}
+	c.mu.Unlock()
+}
+
+// ResetOffset implements PartitionConsumer
+func (c *partitionConsumer) ResetOffset(offset int64, metadata string) {
+	c.mu.Lock()
+	if next := offset + 1; next <= c.state.Info.Offset {
+		c.state.Info.Offset = next
+		c.state.Info.Metadata = metadata
+		c.state.Dirty = true
+	}
+	c.mu.Unlock()
+}
+
+// --------------------------------------------------------------------
+
+type partitionState struct {
+	Info       offsetInfo
+	Dirty      bool
+	LastCommit time.Time
+}
+
+// --------------------------------------------------------------------
+
+type partitionMap struct {
+	data map[topicPartition]*partitionConsumer
+	mu   sync.RWMutex
+}
+
+func newPartitionMap() *partitionMap {
+	return &partitionMap{
+		data: make(map[topicPartition]*partitionConsumer),
+	}
+}
+
+func (m *partitionMap) IsSubscribedTo(topic string) bool {
+	m.mu.RLock()
+	defer m.mu.RUnlock()
+
+	for tp := range m.data {
+		if tp.Topic == topic {
+			return true
+		}
+	}
+	return false
+}
+
+func (m *partitionMap) Fetch(topic string, partition int32) *partitionConsumer {
+	m.mu.RLock()
+	pc, _ := m.data[topicPartition{topic, partition}]
+	m.mu.RUnlock()
+	return pc
+}
+
+func (m *partitionMap) Store(topic string, partition int32, pc *partitionConsumer) {
+	m.mu.Lock()
+	m.data[topicPartition{topic, partition}] = pc
+	m.mu.Unlock()
+}
+
+func (m *partitionMap) Snapshot() map[topicPartition]partitionState {
+	m.mu.RLock()
+	defer m.mu.RUnlock()
+
+	snap := make(map[topicPartition]partitionState, len(m.data))
+	for tp, pc := range m.data {
+		snap[tp] = pc.getState()
+	}
+	return snap
+}
+
+func (m *partitionMap) Stop() {
+	m.mu.RLock()
+	defer m.mu.RUnlock()
+
+	var wg sync.WaitGroup
+	for tp := range m.data {
+		wg.Add(1)
+		go func(p *partitionConsumer) {
+			_ = p.Close()
+			wg.Done()
+		}(m.data[tp])
+	}
+	wg.Wait()
+}
+
+func (m *partitionMap) Clear() {
+	m.mu.Lock()
+	for tp := range m.data {
+		delete(m.data, tp)
+	}
+	m.mu.Unlock()
+}
+
+func (m *partitionMap) Info() map[string][]int32 {
+	info := make(map[string][]int32)
+	m.mu.RLock()
+	for tp := range m.data {
+		info[tp.Topic] = append(info[tp.Topic], tp.Partition)
+	}
+	m.mu.RUnlock()
+
+	for topic := range info {
+		sort.Sort(int32Slice(info[topic]))
+	}
+	return info
+}
diff --git a/vendor/github.com/bsm/sarama-cluster/util.go b/vendor/github.com/bsm/sarama-cluster/util.go
new file mode 100644
index 0000000..e7cb5dd
--- /dev/null
+++ b/vendor/github.com/bsm/sarama-cluster/util.go
@@ -0,0 +1,75 @@
+package cluster
+
+import (
+	"fmt"
+	"sort"
+	"sync"
+)
+
+type none struct{}
+
+type topicPartition struct {
+	Topic     string
+	Partition int32
+}
+
+func (tp *topicPartition) String() string {
+	return fmt.Sprintf("%s-%d", tp.Topic, tp.Partition)
+}
+
+type offsetInfo struct {
+	Offset   int64
+	Metadata string
+}
+
+func (i offsetInfo) NextOffset(fallback int64) int64 {
+	if i.Offset > -1 {
+		return i.Offset
+	}
+	return fallback
+}
+
+type int32Slice []int32
+
+func (p int32Slice) Len() int           { return len(p) }
+func (p int32Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p int32Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func (p int32Slice) Diff(o int32Slice) (res []int32) {
+	on := len(o)
+	for _, x := range p {
+		n := sort.Search(on, func(i int) bool { return o[i] >= x })
+		if n < on && o[n] == x {
+			continue
+		}
+		res = append(res, x)
+	}
+	return
+}
+
+// --------------------------------------------------------------------
+
+type loopTomb struct {
+	c chan none
+	o sync.Once
+	w sync.WaitGroup
+}
+
+func newLoopTomb() *loopTomb {
+	return &loopTomb{c: make(chan none)}
+}
+
+func (t *loopTomb) stop()  { t.o.Do(func() { close(t.c) }) }
+func (t *loopTomb) Close() { t.stop(); t.w.Wait() }
+
+func (t *loopTomb) Dying() <-chan none { return t.c }
+func (t *loopTomb) Go(f func(<-chan none)) {
+	t.w.Add(1)
+
+	go func() {
+		defer t.stop()
+		defer t.w.Done()
+
+		f(t.c)
+	}()
+}
diff --git a/vendor/github.com/cevaris/ordered_map/.gitignore b/vendor/github.com/cevaris/ordered_map/.gitignore
new file mode 100644
index 0000000..4af4e59
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/.gitignore
@@ -0,0 +1,5 @@
+*.test
+*~
+
+.idea
+*.iml
diff --git a/vendor/github.com/cevaris/ordered_map/.travis.yml b/vendor/github.com/cevaris/ordered_map/.travis.yml
new file mode 100644
index 0000000..6d74ee9
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/.travis.yml
@@ -0,0 +1,16 @@
+---
+language: go
+
+go:
+  - tip
+  - 1.9
+  - 1.8
+  - 1.7
+  - 1.6
+  - 1.5
+  - 1.4
+  - 1.3
+
+install:
+  - make
+  - make test
diff --git a/vendor/github.com/cevaris/ordered_map/LICENSE.md b/vendor/github.com/cevaris/ordered_map/LICENSE.md
new file mode 100644
index 0000000..4cb9b14
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/LICENSE.md
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015-2016 Adam Cardenas
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/cevaris/ordered_map/Makefile b/vendor/github.com/cevaris/ordered_map/Makefile
new file mode 100644
index 0000000..099e53b
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/Makefile
@@ -0,0 +1,10 @@
+all: build install 
+
+build:
+	go build
+
+install:
+	go install
+
+test:
+	go test -v *.go
diff --git a/vendor/github.com/cevaris/ordered_map/README.md b/vendor/github.com/cevaris/ordered_map/README.md
new file mode 100644
index 0000000..37b4503
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/README.md
@@ -0,0 +1,113 @@
+# Ordered Map for golang
+
+[![Build Status](https://travis-ci.org/cevaris/ordered_map.svg?branch=master)](https://travis-ci.org/cevaris/ordered_map)
+
+**OrderedMap** is a Python port of OrderedDict implemented in golang. Golang's builtin `map` purposefully randomizes the iteration of stored key/values. **OrderedMap** struct preserves inserted key/value pairs; such that on iteration, key/value pairs are received in inserted (first in, first out) order.
+
+
+## Features
+- Full support Key/Value for all data types
+- Exposes an Iterator that iterates in order of insertion
+- Full Get/Set/Delete map interface
+- Supports Golang v1.3 through v1.9
+
+## Download and Install 
+  
+`go get https://github.com/cevaris/ordered_map.git`
+
+
+## Examples
+
+### Create, Get, Set, Delete
+
+```go
+package main
+
+import (
+    "fmt"
+    "github.com/cevaris/ordered_map"
+)
+
+func main() {
+
+    // Init new OrderedMap
+    om := ordered_map.NewOrderedMap()
+
+    // Set key
+    om.Set("a", 1)
+    om.Set("b", 2)
+    om.Set("c", 3)
+    om.Set("d", 4)
+
+    // Same interface as builtin map
+    if val, ok := om.Get("b"); ok == true {
+        // Found key "b"
+        fmt.Println(val)
+    }
+
+    // Delete a key
+    om.Delete("c")
+
+    // Failed Get lookup becase we deleted "c"
+    if _, ok := om.Get("c"); ok == false {
+        // Did not find key "c"
+        fmt.Println("c not found")
+    }
+    
+    fmt.Println(om)
+}
+```
+
+
+### Iterator
+
+```go
+n := 100
+om := ordered_map.NewOrderedMap()
+
+for i := 0; i < n; i++ {
+    // Insert data into OrderedMap
+    om.Set(i, fmt.Sprintf("%d", i * i))
+}
+
+// Iterate though values
+// - Values iteration are in insert order
+// - Returned in a key/value pair struct
+iter := om.IterFunc()
+for kv, ok := iter(); ok; kv, ok = iter() {
+    fmt.Println(kv, kv.Key, kv.Value)
+}
+```
+
+### Custom Structs
+
+```go
+om := ordered_map.NewOrderedMap()
+om.Set("one", &MyStruct{1, 1.1})
+om.Set("two", &MyStruct{2, 2.2})
+om.Set("three", &MyStruct{3, 3.3})
+
+fmt.Println(om)
+// Ouput: OrderedMap[one:&{1 1.1},  two:&{2 2.2},  three:&{3 3.3}, ]
+```
+  
+## For Development
+
+Git clone project 
+
+`git clone https://github.com/cevaris/ordered_map.git`  
+  
+Build and install project
+
+`make`
+
+Run tests 
+
+`make test`
+
+
+
+
+
+
+
diff --git a/vendor/github.com/cevaris/ordered_map/key_pair.go b/vendor/github.com/cevaris/ordered_map/key_pair.go
new file mode 100644
index 0000000..88afbcf
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/key_pair.go
@@ -0,0 +1,16 @@
+package ordered_map
+
+import "fmt"
+
+type KVPair struct {
+	Key   interface{}
+	Value interface{}
+}
+
+func (k *KVPair) String() string {
+	return fmt.Sprintf("%v:%v", k.Key, k.Value)
+}
+
+func (kv1 *KVPair) Compare(kv2 *KVPair) bool {
+	return kv1.Key == kv2.Key && kv1.Value == kv2.Value
+}
\ No newline at end of file
diff --git a/vendor/github.com/cevaris/ordered_map/node.go b/vendor/github.com/cevaris/ordered_map/node.go
new file mode 100644
index 0000000..ad0d142
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/node.go
@@ -0,0 +1,62 @@
+package ordered_map
+
+import (
+	"fmt"
+	"bytes"
+)
+
+type node struct {
+	Prev  *node
+	Next  *node
+	Value interface{}
+}
+
+func newRootNode() *node {
+	root := &node{}
+	root.Prev = root
+	root.Next = root
+	return root
+}
+
+func newNode(prev *node, next *node, key interface{}) *node {
+	return &node{Prev: prev, Next: next, Value: key}
+}
+
+func (n *node) Add(value string) {
+	root := n
+	last := root.Prev
+	last.Next = newNode(last, n, value)
+	root.Prev = last.Next
+}
+
+func (n *node) String() string {
+	var buffer bytes.Buffer
+	if n.Value == "" {
+		// Need to sentinel
+		var curr *node
+		root := n
+		curr = root.Next
+		for curr != root {
+			buffer.WriteString(fmt.Sprintf("%s, ", curr.Value))
+			curr = curr.Next
+		}
+	} else {
+		// Else, print pointer value
+		buffer.WriteString(fmt.Sprintf("%p, ", &n))
+	}
+	return fmt.Sprintf("LinkList[%v]", buffer.String())
+}
+
+func (n *node) IterFunc() func() (string, bool) {
+	var curr *node
+	root := n
+	curr = root.Next
+	return func() (string, bool) {
+		for curr != root {
+			tmp := curr.Value.(string)
+			curr = curr.Next
+			return tmp, true
+		}
+		return "", false
+	}
+}
diff --git a/vendor/github.com/cevaris/ordered_map/ordered_map.go b/vendor/github.com/cevaris/ordered_map/ordered_map.go
new file mode 100644
index 0000000..4116cd8
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/ordered_map.go
@@ -0,0 +1,121 @@
+package ordered_map
+
+import (
+	"fmt"
+)
+
+type OrderedMap struct {
+	store  map[interface{}]interface{}
+	mapper map[interface{}]*node
+	root   *node
+}
+
+func NewOrderedMap() *OrderedMap {
+	om := &OrderedMap{
+		store:  make(map[interface{}]interface{}),
+		mapper: make(map[interface{}]*node),
+		root:   newRootNode(),
+	}
+	return om
+}
+
+func NewOrderedMapWithArgs(args []*KVPair) *OrderedMap {
+	om := NewOrderedMap()
+	om.update(args)
+	return om
+}
+
+func (om *OrderedMap) update(args []*KVPair) {
+	for _, pair := range args {
+		om.Set(pair.Key, pair.Value)
+	}
+}
+
+func (om *OrderedMap) Set(key interface{}, value interface{}) {
+	if _, ok := om.store[key]; ok == false {
+		root := om.root
+		last := root.Prev
+		last.Next = newNode(last, root, key)
+		root.Prev = last.Next
+		om.mapper[key] = last.Next
+	}
+	om.store[key] = value
+}
+
+func (om *OrderedMap) Get(key interface{}) (interface{}, bool) {
+	val, ok := om.store[key]
+	return val, ok
+}
+
+func (om *OrderedMap) Delete(key interface{}) {
+	_, ok := om.store[key]
+	if ok {
+		delete(om.store, key)
+	}
+	root, rootFound := om.mapper[key]
+	if rootFound {
+		prev := root.Prev
+		next := root.Next
+		prev.Next = next
+		next.Prev = prev
+		delete(om.mapper, key)
+	}
+}
+
+func (om *OrderedMap) String() string {
+	builder := make([]string, len(om.store))
+
+	var index int = 0
+	iter := om.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		val, _ := om.Get(kv.Key)
+		builder[index] = fmt.Sprintf("%v:%v, ", kv.Key, val)
+		index++
+	}
+	return fmt.Sprintf("OrderedMap%v", builder)
+}
+
+func (om *OrderedMap) Iter() <-chan *KVPair {
+	println("Iter() method is deprecated!. Use IterFunc() instead.")
+	return om.UnsafeIter()
+}
+
+/*
+Beware, Iterator leaks goroutines if we do not fully traverse the map.
+For most cases, `IterFunc()` should work as an iterator.
+ */
+func (om *OrderedMap) UnsafeIter() <-chan *KVPair {
+	keys := make(chan *KVPair)
+	go func() {
+		defer close(keys)
+		var curr *node
+		root := om.root
+		curr = root.Next
+		for curr != root {
+			v, _ := om.store[curr.Value]
+			keys <- &KVPair{curr.Value, v}
+			curr = curr.Next
+		}
+	}()
+	return keys
+}
+
+func (om *OrderedMap) IterFunc() func() (*KVPair, bool) {
+	var curr *node
+	root := om.root
+	curr = root.Next
+	return func() (*KVPair, bool) {
+		for curr != root {
+			tmp := curr
+			curr = curr.Next
+			v, _ := om.store[tmp.Value]
+			return &KVPair{tmp.Value, v}, true
+		}
+		return nil, false
+	}
+}
+
+func (om *OrderedMap) Len() int {
+	return len(om.store)
+}
+
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/LICENSE b/vendor/github.com/confluentinc/confluent-kafka-go/LICENSE
new file mode 100644
index 0000000..e06d208
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/.gitignore b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/.gitignore
new file mode 100644
index 0000000..b1a2111
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/.gitignore
@@ -0,0 +1,2 @@
+testconf.json
+go_rdkafka_generr/go_rdkafka_generr
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/00version.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/00version.go
new file mode 100644
index 0000000..188dc03
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/00version.go
@@ -0,0 +1,58 @@
+package kafka
+
+/**
+ * Copyright 2016-2018 Confluent Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import (
+	"fmt"
+)
+
+/*
+#include <librdkafka/rdkafka.h>
+
+//Minimum required librdkafka version. This is checked both during
+//build-time and runtime.
+//Make sure to keep the MIN_RD_KAFKA_VERSION, MIN_VER_ERRSTR and #error
+//defines and strings in sync.
+//
+
+#define MIN_RD_KAFKA_VERSION 0x0000b0500
+
+#ifdef __APPLE__
+#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v0.11.5 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`"
+#else
+#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v0.11.5 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html"
+#endif
+
+#if RD_KAFKA_VERSION < MIN_RD_KAFKA_VERSION
+#ifdef __APPLE__
+#error "confluent-kafka-go requires librdkafka v0.11.5 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`"
+#else
+#error "confluent-kafka-go requires librdkafka v0.11.5 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html"
+#endif
+#endif
+*/
+import "C"
+
+func versionCheck() error {
+	ver, verstr := LibraryVersion()
+	if ver < C.MIN_RD_KAFKA_VERSION {
+		return newErrorFromString(ErrNotImplemented,
+			fmt.Sprintf("%s: librdkafka version %s (0x%x) detected",
+				C.MIN_VER_ERRSTR, verstr, ver))
+	}
+	return nil
+}
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/README.md b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/README.md
new file mode 100644
index 0000000..6df4d54
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/README.md
@@ -0,0 +1,69 @@
+# Information for confluent-kafka-go developers
+
+Whenever librdkafka error codes are updated make sure to run generate before building:
+
+```
+  $ (cd go_rdkafka_generr && go install) && go generate
+  $ go build
+```
+
+
+
+
+## Testing
+
+Some of the tests included in this directory, the benchmark and integration tests in particular,
+require an existing Kafka cluster and a testconf.json configuration file to
+provide tests with bootstrap brokers, topic name, etc.
+
+The format of testconf.json is a JSON object:
+```
+{
+  "Brokers": "<bootstrap-brokers>",
+  "Topic": "<test-topic-name>"
+}
+```
+
+See testconf-example.json for an example and full set of available options.
+
+
+To run unit-tests:
+```
+$ go test
+```
+
+To run benchmark tests:
+```
+$ go test -bench .
+```
+
+For the code coverage:
+```
+$ go test -coverprofile=coverage.out -bench=.
+$ go tool cover -func=coverage.out
+```
+
+## Build tags (static linking)
+
+
+Different build types are supported through Go build tags (`-tags ..`),
+these tags should be specified on the **application** build command.
+
+ * `static` - Build with librdkafka linked statically (but librdkafka
+              dependencies linked dynamically).
+ * `static_all` - Build with all libraries linked statically.
+ * neither - Build with librdkafka (and its dependencies) linked dynamically.
+
+
+
+## Generating HTML documentation
+
+To generate one-page HTML documentation run the mk/doc-gen.py script from the
+top-level directory. This script requires the beautifulsoup4 Python package.
+
+```
+$ source .../your/virtualenv/bin/activate
+$ pip install beautifulsoup4
+...
+$ mk/doc-gen.py > kafka.html
+```
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminapi.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminapi.go
new file mode 100644
index 0000000..c2ba76c
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminapi.go
@@ -0,0 +1,942 @@
+/**
+ * Copyright 2018 Confluent Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka
+
+import (
+	"context"
+	"fmt"
+	"strings"
+	"time"
+	"unsafe"
+)
+
+/*
+#include <librdkafka/rdkafka.h>
+#include <stdlib.h>
+
+static const rd_kafka_topic_result_t *
+topic_result_by_idx (const rd_kafka_topic_result_t **topics, size_t cnt, size_t idx) {
+    if (idx >= cnt)
+      return NULL;
+    return topics[idx];
+}
+
+static const rd_kafka_ConfigResource_t *
+ConfigResource_by_idx (const rd_kafka_ConfigResource_t **res, size_t cnt, size_t idx) {
+    if (idx >= cnt)
+      return NULL;
+    return res[idx];
+}
+
+static const rd_kafka_ConfigEntry_t *
+ConfigEntry_by_idx (const rd_kafka_ConfigEntry_t **entries, size_t cnt, size_t idx) {
+    if (idx >= cnt)
+      return NULL;
+    return entries[idx];
+}
+*/
+import "C"
+
+// AdminClient is derived from an existing Producer or Consumer
+type AdminClient struct {
+	handle    *handle
+	isDerived bool // Derived from existing client handle
+}
+
+func durationToMilliseconds(t time.Duration) int {
+	if t > 0 {
+		return (int)(t.Seconds() * 1000.0)
+	}
+	return (int)(t)
+}
+
+// TopicResult provides per-topic operation result (error) information.
+type TopicResult struct {
+	// Topic name
+	Topic string
+	// Error, if any, of result. Check with `Error.Code() != ErrNoError`.
+	Error Error
+}
+
+// String returns a human-readable representation of a TopicResult.
+func (t TopicResult) String() string {
+	if t.Error.code == 0 {
+		return t.Topic
+	}
+	return fmt.Sprintf("%s (%s)", t.Topic, t.Error.str)
+}
+
+// TopicSpecification holds parameters for creating a new topic.
+// TopicSpecification is analogous to NewTopic in the Java Topic Admin API.
+type TopicSpecification struct {
+	// Topic name to create.
+	Topic string
+	// Number of partitions in topic.
+	NumPartitions int
+	// Default replication factor for the topic's partitions, or zero
+	// if an explicit ReplicaAssignment is set.
+	ReplicationFactor int
+	// (Optional) Explicit replica assignment. The outer array is
+	// indexed by the partition number, while the inner per-partition array
+	// contains the replica broker ids. The first broker in each
+	// broker id list will be the preferred replica.
+	ReplicaAssignment [][]int32
+	// Topic configuration.
+	Config map[string]string
+}
+
+// PartitionsSpecification holds parameters for creating additional partitions for a topic.
+// PartitionsSpecification is analogous to NewPartitions in the Java Topic Admin API.
+type PartitionsSpecification struct {
+	// Topic to create more partitions for.
+	Topic string
+	// New partition count for topic, must be higher than current partition count.
+	IncreaseTo int
+	// (Optional) Explicit replica assignment. The outer array is
+	// indexed by the new partition index (i.e., 0 for the first added
+	// partition), while the inner per-partition array
+	// contains the replica broker ids. The first broker in each
+	// broker id list will be the preferred replica.
+	ReplicaAssignment [][]int32
+}
+
+// ResourceType represents an Apache Kafka resource type
+type ResourceType int
+
+const (
+	// ResourceUnknown - Unknown
+	ResourceUnknown = ResourceType(C.RD_KAFKA_RESOURCE_UNKNOWN)
+	// ResourceAny - match any resource type (DescribeConfigs)
+	ResourceAny = ResourceType(C.RD_KAFKA_RESOURCE_ANY)
+	// ResourceTopic - Topic
+	ResourceTopic = ResourceType(C.RD_KAFKA_RESOURCE_TOPIC)
+	// ResourceGroup - Group
+	ResourceGroup = ResourceType(C.RD_KAFKA_RESOURCE_GROUP)
+	// ResourceBroker - Broker
+	ResourceBroker = ResourceType(C.RD_KAFKA_RESOURCE_BROKER)
+)
+
+// String returns the human-readable representation of a ResourceType
+func (t ResourceType) String() string {
+	return C.GoString(C.rd_kafka_ResourceType_name(C.rd_kafka_ResourceType_t(t)))
+}
+
+// ResourceTypeFromString translates a resource type name/string to
+// a ResourceType value.
+func ResourceTypeFromString(typeString string) (ResourceType, error) {
+	switch strings.ToUpper(typeString) {
+	case "ANY":
+		return ResourceAny, nil
+	case "TOPIC":
+		return ResourceTopic, nil
+	case "GROUP":
+		return ResourceGroup, nil
+	case "BROKER":
+		return ResourceBroker, nil
+	default:
+		return ResourceUnknown, newGoError(ErrInvalidArg)
+	}
+}
+
+// ConfigSource represents an Apache Kafka config source
+type ConfigSource int
+
+const (
+	// ConfigSourceUnknown is the default value
+	ConfigSourceUnknown = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG)
+	// ConfigSourceDynamicTopic is dynamic topic config that is configured for a specific topic
+	ConfigSourceDynamicTopic = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG)
+	// ConfigSourceDynamicBroker is dynamic broker config that is configured for a specific broker
+	ConfigSourceDynamicBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG)
+	// ConfigSourceDynamicDefaultBroker is dynamic broker config that is configured as default for all brokers in the cluster
+	ConfigSourceDynamicDefaultBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG)
+	// ConfigSourceStaticBroker is static broker config provided as broker properties at startup (e.g. from server.properties file)
+	ConfigSourceStaticBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG)
+	// ConfigSourceDefault is built-in default configuration for configs that have a default value
+	ConfigSourceDefault = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG)
+)
+
+// String returns the human-readable representation of a ConfigSource type
+func (t ConfigSource) String() string {
+	return C.GoString(C.rd_kafka_ConfigSource_name(C.rd_kafka_ConfigSource_t(t)))
+}
+
+// ConfigResource holds parameters for altering an Apache Kafka configuration resource
+type ConfigResource struct {
+	// Type of resource to set.
+	Type ResourceType
+	// Name of resource to set.
+	Name string
+	// Config entries to set.
+	// Configuration updates are atomic, any configuration property not provided
+	// here will be reverted (by the broker) to its default value.
+	// Use DescribeConfigs to retrieve the list of current configuration entry values.
+	Config []ConfigEntry
+}
+
+// String returns a human-readable representation of a ConfigResource
+func (c ConfigResource) String() string {
+	return fmt.Sprintf("Resource(%s, %s)", c.Type, c.Name)
+}
+
+// AlterOperation specifies the operation to perform on the ConfigEntry.
+// Currently only AlterOperationSet.
+type AlterOperation int
+
+const (
+	// AlterOperationSet sets/overwrites the configuration setting.
+	AlterOperationSet = iota
+)
+
+// String returns the human-readable representation of an AlterOperation
+func (o AlterOperation) String() string {
+	switch o {
+	case AlterOperationSet:
+		return "Set"
+	default:
+		return fmt.Sprintf("Unknown%d?", int(o))
+	}
+}
+
+// ConfigEntry holds parameters for altering a resource's configuration.
+type ConfigEntry struct {
+	// Name of configuration entry, e.g., topic configuration property name.
+	Name string
+	// Value of configuration entry.
+	Value string
+	// Operation to perform on the entry.
+	Operation AlterOperation
+}
+
+// StringMapToConfigEntries creates a new map of ConfigEntry objects from the
+// provided string map. The AlterOperation is set on each created entry.
+func StringMapToConfigEntries(stringMap map[string]string, operation AlterOperation) []ConfigEntry {
+	var ceList []ConfigEntry
+
+	for k, v := range stringMap {
+		ceList = append(ceList, ConfigEntry{Name: k, Value: v, Operation: operation})
+	}
+
+	return ceList
+}
+
+// String returns a human-readable representation of a ConfigEntry.
+func (c ConfigEntry) String() string {
+	return fmt.Sprintf("%v %s=\"%s\"", c.Operation, c.Name, c.Value)
+}
+
+// ConfigEntryResult contains the result of a single configuration entry from a
+// DescribeConfigs request.
+type ConfigEntryResult struct {
+	// Name of configuration entry, e.g., topic configuration property name.
+	Name string
+	// Value of configuration entry.
+	Value string
+	// Source indicates the configuration source.
+	Source ConfigSource
+	// IsReadOnly indicates whether the configuration entry can be altered.
+	IsReadOnly bool
+	// IsSensitive indicates whether the configuration entry contains sensitive information, in which case the value will be unset.
+	IsSensitive bool
+	// IsSynonym indicates whether the configuration entry is a synonym for another configuration property.
+	IsSynonym bool
+	// Synonyms contains a map of configuration entries that are synonyms to this configuration entry.
+	Synonyms map[string]ConfigEntryResult
+}
+
+// String returns a human-readable representation of a ConfigEntryResult.
+func (c ConfigEntryResult) String() string {
+	return fmt.Sprintf("%s=\"%s\"", c.Name, c.Value)
+}
+
+// setFromC sets up a ConfigEntryResult from a C ConfigEntry
+func configEntryResultFromC(cEntry *C.rd_kafka_ConfigEntry_t) (entry ConfigEntryResult) {
+	entry.Name = C.GoString(C.rd_kafka_ConfigEntry_name(cEntry))
+	cValue := C.rd_kafka_ConfigEntry_value(cEntry)
+	if cValue != nil {
+		entry.Value = C.GoString(cValue)
+	}
+	entry.Source = ConfigSource(C.rd_kafka_ConfigEntry_source(cEntry))
+	entry.IsReadOnly = cint2bool(C.rd_kafka_ConfigEntry_is_read_only(cEntry))
+	entry.IsSensitive = cint2bool(C.rd_kafka_ConfigEntry_is_sensitive(cEntry))
+	entry.IsSynonym = cint2bool(C.rd_kafka_ConfigEntry_is_synonym(cEntry))
+
+	var cSynCnt C.size_t
+	cSyns := C.rd_kafka_ConfigEntry_synonyms(cEntry, &cSynCnt)
+	if cSynCnt > 0 {
+		entry.Synonyms = make(map[string]ConfigEntryResult)
+	}
+
+	for si := 0; si < int(cSynCnt); si++ {
+		cSyn := C.ConfigEntry_by_idx(cSyns, cSynCnt, C.size_t(si))
+		Syn := configEntryResultFromC(cSyn)
+		entry.Synonyms[Syn.Name] = Syn
+	}
+
+	return entry
+}
+
+// ConfigResourceResult provides the result for a resource from a AlterConfigs or
+// DescribeConfigs request.
+type ConfigResourceResult struct {
+	// Type of returned result resource.
+	Type ResourceType
+	// Name of returned result resource.
+	Name string
+	// Error, if any, of returned result resource.
+	Error Error
+	// Config entries, if any, of returned result resource.
+	Config map[string]ConfigEntryResult
+}
+
+// String returns a human-readable representation of a ConfigResourceResult.
+func (c ConfigResourceResult) String() string {
+	if c.Error.Code() != 0 {
+		return fmt.Sprintf("ResourceResult(%s, %s, \"%v\")", c.Type, c.Name, c.Error)
+
+	}
+	return fmt.Sprintf("ResourceResult(%s, %s, %d config(s))", c.Type, c.Name, len(c.Config))
+}
+
+// waitResult waits for a result event on cQueue or the ctx to be cancelled, whichever happens
+// first.
+// The returned result event is checked for errors its error is returned if set.
+func (a *AdminClient) waitResult(ctx context.Context, cQueue *C.rd_kafka_queue_t, cEventType C.rd_kafka_event_type_t) (rkev *C.rd_kafka_event_t, err error) {
+
+	resultChan := make(chan *C.rd_kafka_event_t)
+	closeChan := make(chan bool) // never written to, just closed
+
+	go func() {
+		for {
+			select {
+			case _, ok := <-closeChan:
+				if !ok {
+					// Context cancelled/timed out
+					close(resultChan)
+					return
+				}
+
+			default:
+				// Wait for result event for at most 50ms
+				// to avoid blocking for too long if
+				// context is cancelled.
+				rkev := C.rd_kafka_queue_poll(cQueue, 50)
+				if rkev != nil {
+					resultChan <- rkev
+					close(resultChan)
+					return
+				}
+			}
+		}
+	}()
+
+	select {
+	case rkev = <-resultChan:
+		// Result type check
+		if cEventType != C.rd_kafka_event_type(rkev) {
+			err = newErrorFromString(ErrInvalidType,
+				fmt.Sprintf("Expected %d result event, not %d", (int)(cEventType), (int)(C.rd_kafka_event_type(rkev))))
+			C.rd_kafka_event_destroy(rkev)
+			return nil, err
+		}
+
+		// Generic error handling
+		cErr := C.rd_kafka_event_error(rkev)
+		if cErr != 0 {
+			err = newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev))
+			C.rd_kafka_event_destroy(rkev)
+			return nil, err
+		}
+		close(closeChan)
+		return rkev, nil
+	case <-ctx.Done():
+		// signal close to go-routine
+		close(closeChan)
+		// wait for close from go-routine to make sure it is done
+		// using cQueue before we return.
+		rkev, ok := <-resultChan
+		if ok {
+			// throw away result since context was cancelled
+			C.rd_kafka_event_destroy(rkev)
+		}
+		return nil, ctx.Err()
+	}
+}
+
+// cToTopicResults converts a C topic_result_t array to Go TopicResult list.
+func (a *AdminClient) cToTopicResults(cTopicRes **C.rd_kafka_topic_result_t, cCnt C.size_t) (result []TopicResult, err error) {
+
+	result = make([]TopicResult, int(cCnt))
+
+	for i := 0; i < int(cCnt); i++ {
+		cTopic := C.topic_result_by_idx(cTopicRes, cCnt, C.size_t(i))
+		result[i].Topic = C.GoString(C.rd_kafka_topic_result_name(cTopic))
+		result[i].Error = newErrorFromCString(
+			C.rd_kafka_topic_result_error(cTopic),
+			C.rd_kafka_topic_result_error_string(cTopic))
+	}
+
+	return result, nil
+}
+
+// cConfigResourceToResult converts a C ConfigResource result array to Go ConfigResourceResult
+func (a *AdminClient) cConfigResourceToResult(cRes **C.rd_kafka_ConfigResource_t, cCnt C.size_t) (result []ConfigResourceResult, err error) {
+
+	result = make([]ConfigResourceResult, int(cCnt))
+
+	for i := 0; i < int(cCnt); i++ {
+		cRes := C.ConfigResource_by_idx(cRes, cCnt, C.size_t(i))
+		result[i].Type = ResourceType(C.rd_kafka_ConfigResource_type(cRes))
+		result[i].Name = C.GoString(C.rd_kafka_ConfigResource_name(cRes))
+		result[i].Error = newErrorFromCString(
+			C.rd_kafka_ConfigResource_error(cRes),
+			C.rd_kafka_ConfigResource_error_string(cRes))
+		var cConfigCnt C.size_t
+		cConfigs := C.rd_kafka_ConfigResource_configs(cRes, &cConfigCnt)
+		if cConfigCnt > 0 {
+			result[i].Config = make(map[string]ConfigEntryResult)
+		}
+		for ci := 0; ci < int(cConfigCnt); ci++ {
+			cEntry := C.ConfigEntry_by_idx(cConfigs, cConfigCnt, C.size_t(ci))
+			entry := configEntryResultFromC(cEntry)
+			result[i].Config[entry.Name] = entry
+		}
+	}
+
+	return result, nil
+}
+
+// CreateTopics creates topics in cluster.
+//
+// The list of TopicSpecification objects define the per-topic partition count, replicas, etc.
+//
+// Topic creation is non-atomic and may succeed for some topics but fail for others,
+// make sure to check the result for topic-specific errors.
+//
+// Note: TopicSpecification is analogous to NewTopic in the Java Topic Admin API.
+func (a *AdminClient) CreateTopics(ctx context.Context, topics []TopicSpecification, options ...CreateTopicsAdminOption) (result []TopicResult, err error) {
+	cTopics := make([]*C.rd_kafka_NewTopic_t, len(topics))
+
+	cErrstrSize := C.size_t(512)
+	cErrstr := (*C.char)(C.malloc(cErrstrSize))
+	defer C.free(unsafe.Pointer(cErrstr))
+
+	// Convert Go TopicSpecifications to C TopicSpecifications
+	for i, topic := range topics {
+
+		var cReplicationFactor C.int
+		if topic.ReplicationFactor == 0 {
+			cReplicationFactor = -1
+		} else {
+			cReplicationFactor = C.int(topic.ReplicationFactor)
+		}
+		if topic.ReplicaAssignment != nil {
+			if cReplicationFactor != -1 {
+				return nil, newErrorFromString(ErrInvalidArg,
+					"TopicSpecification.ReplicationFactor and TopicSpecification.ReplicaAssignment are mutually exclusive")
+			}
+
+			if len(topic.ReplicaAssignment) != topic.NumPartitions {
+				return nil, newErrorFromString(ErrInvalidArg,
+					"TopicSpecification.ReplicaAssignment must contain exactly TopicSpecification.NumPartitions partitions")
+			}
+
+		} else if cReplicationFactor == -1 {
+			return nil, newErrorFromString(ErrInvalidArg,
+				"TopicSpecification.ReplicationFactor or TopicSpecification.ReplicaAssignment must be specified")
+		}
+
+		cTopics[i] = C.rd_kafka_NewTopic_new(
+			C.CString(topic.Topic),
+			C.int(topic.NumPartitions),
+			cReplicationFactor,
+			cErrstr, cErrstrSize)
+		if cTopics[i] == nil {
+			return nil, newErrorFromString(ErrInvalidArg,
+				fmt.Sprintf("Topic %s: %s", topic.Topic, C.GoString(cErrstr)))
+		}
+
+		defer C.rd_kafka_NewTopic_destroy(cTopics[i])
+
+		for p, replicas := range topic.ReplicaAssignment {
+			cReplicas := make([]C.int32_t, len(replicas))
+			for ri, replica := range replicas {
+				cReplicas[ri] = C.int32_t(replica)
+			}
+			cErr := C.rd_kafka_NewTopic_set_replica_assignment(
+				cTopics[i], C.int32_t(p),
+				(*C.int32_t)(&cReplicas[0]), C.size_t(len(cReplicas)),
+				cErrstr, cErrstrSize)
+			if cErr != 0 {
+				return nil, newCErrorFromString(cErr,
+					fmt.Sprintf("Failed to set replica assignment for topic %s partition %d: %s", topic.Topic, p, C.GoString(cErrstr)))
+			}
+		}
+
+		for key, value := range topic.Config {
+			cErr := C.rd_kafka_NewTopic_set_config(
+				cTopics[i],
+				C.CString(key), C.CString(value))
+			if cErr != 0 {
+				return nil, newCErrorFromString(cErr,
+					fmt.Sprintf("Failed to set config %s=%s for topic %s", key, value, topic.Topic))
+			}
+		}
+	}
+
+	// Convert Go AdminOptions (if any) to C AdminOptions
+	genericOptions := make([]AdminOption, len(options))
+	for i := range options {
+		genericOptions[i] = options[i]
+	}
+	cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_CREATETOPICS, genericOptions)
+	if err != nil {
+		return nil, err
+	}
+	defer C.rd_kafka_AdminOptions_destroy(cOptions)
+
+	// Create temporary queue for async operation
+	cQueue := C.rd_kafka_queue_new(a.handle.rk)
+	defer C.rd_kafka_queue_destroy(cQueue)
+
+	// Asynchronous call
+	C.rd_kafka_CreateTopics(
+		a.handle.rk,
+		(**C.rd_kafka_NewTopic_t)(&cTopics[0]),
+		C.size_t(len(cTopics)),
+		cOptions,
+		cQueue)
+
+	// Wait for result, error or context timeout
+	rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_CREATETOPICS_RESULT)
+	if err != nil {
+		return nil, err
+	}
+	defer C.rd_kafka_event_destroy(rkev)
+
+	cRes := C.rd_kafka_event_CreateTopics_result(rkev)
+
+	// Convert result from C to Go
+	var cCnt C.size_t
+	cTopicRes := C.rd_kafka_CreateTopics_result_topics(cRes, &cCnt)
+
+	return a.cToTopicResults(cTopicRes, cCnt)
+}
+
+// DeleteTopics deletes a batch of topics.
+//
+// This operation is not transactional and may succeed for a subset of topics while
+// failing others.
+// It may take several seconds after the DeleteTopics result returns success for
+// all the brokers to become aware that the topics are gone. During this time,
+// topic metadata and configuration may continue to return information about deleted topics.
+//
+// Requires broker version >= 0.10.1.0
+func (a *AdminClient) DeleteTopics(ctx context.Context, topics []string, options ...DeleteTopicsAdminOption) (result []TopicResult, err error) {
+	cTopics := make([]*C.rd_kafka_DeleteTopic_t, len(topics))
+
+	cErrstrSize := C.size_t(512)
+	cErrstr := (*C.char)(C.malloc(cErrstrSize))
+	defer C.free(unsafe.Pointer(cErrstr))
+
+	// Convert Go DeleteTopics to C DeleteTopics
+	for i, topic := range topics {
+		cTopics[i] = C.rd_kafka_DeleteTopic_new(C.CString(topic))
+		if cTopics[i] == nil {
+			return nil, newErrorFromString(ErrInvalidArg,
+				fmt.Sprintf("Invalid arguments for topic %s", topic))
+		}
+
+		defer C.rd_kafka_DeleteTopic_destroy(cTopics[i])
+	}
+
+	// Convert Go AdminOptions (if any) to C AdminOptions
+	genericOptions := make([]AdminOption, len(options))
+	for i := range options {
+		genericOptions[i] = options[i]
+	}
+	cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_DELETETOPICS, genericOptions)
+	if err != nil {
+		return nil, err
+	}
+	defer C.rd_kafka_AdminOptions_destroy(cOptions)
+
+	// Create temporary queue for async operation
+	cQueue := C.rd_kafka_queue_new(a.handle.rk)
+	defer C.rd_kafka_queue_destroy(cQueue)
+
+	// Asynchronous call
+	C.rd_kafka_DeleteTopics(
+		a.handle.rk,
+		(**C.rd_kafka_DeleteTopic_t)(&cTopics[0]),
+		C.size_t(len(cTopics)),
+		cOptions,
+		cQueue)
+
+	// Wait for result, error or context timeout
+	rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DELETETOPICS_RESULT)
+	if err != nil {
+		return nil, err
+	}
+	defer C.rd_kafka_event_destroy(rkev)
+
+	cRes := C.rd_kafka_event_DeleteTopics_result(rkev)
+
+	// Convert result from C to Go
+	var cCnt C.size_t
+	cTopicRes := C.rd_kafka_DeleteTopics_result_topics(cRes, &cCnt)
+
+	return a.cToTopicResults(cTopicRes, cCnt)
+}
+
+// CreatePartitions creates additional partitions for topics.
+func (a *AdminClient) CreatePartitions(ctx context.Context, partitions []PartitionsSpecification, options ...CreatePartitionsAdminOption) (result []TopicResult, err error) {
+	cParts := make([]*C.rd_kafka_NewPartitions_t, len(partitions))
+
+	cErrstrSize := C.size_t(512)
+	cErrstr := (*C.char)(C.malloc(cErrstrSize))
+	defer C.free(unsafe.Pointer(cErrstr))
+
+	// Convert Go PartitionsSpecification to C NewPartitions
+	for i, part := range partitions {
+		cParts[i] = C.rd_kafka_NewPartitions_new(C.CString(part.Topic), C.size_t(part.IncreaseTo), cErrstr, cErrstrSize)
+		if cParts[i] == nil {
+			return nil, newErrorFromString(ErrInvalidArg,
+				fmt.Sprintf("Topic %s: %s", part.Topic, C.GoString(cErrstr)))
+		}
+
+		defer C.rd_kafka_NewPartitions_destroy(cParts[i])
+
+		for pidx, replicas := range part.ReplicaAssignment {
+			cReplicas := make([]C.int32_t, len(replicas))
+			for ri, replica := range replicas {
+				cReplicas[ri] = C.int32_t(replica)
+			}
+			cErr := C.rd_kafka_NewPartitions_set_replica_assignment(
+				cParts[i], C.int32_t(pidx),
+				(*C.int32_t)(&cReplicas[0]), C.size_t(len(cReplicas)),
+				cErrstr, cErrstrSize)
+			if cErr != 0 {
+				return nil, newCErrorFromString(cErr,
+					fmt.Sprintf("Failed to set replica assignment for topic %s new partition index %d: %s", part.Topic, pidx, C.GoString(cErrstr)))
+			}
+		}
+
+	}
+
+	// Convert Go AdminOptions (if any) to C AdminOptions
+	genericOptions := make([]AdminOption, len(options))
+	for i := range options {
+		genericOptions[i] = options[i]
+	}
+	cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, genericOptions)
+	if err != nil {
+		return nil, err
+	}
+	defer C.rd_kafka_AdminOptions_destroy(cOptions)
+
+	// Create temporary queue for async operation
+	cQueue := C.rd_kafka_queue_new(a.handle.rk)
+	defer C.rd_kafka_queue_destroy(cQueue)
+
+	// Asynchronous call
+	C.rd_kafka_CreatePartitions(
+		a.handle.rk,
+		(**C.rd_kafka_NewPartitions_t)(&cParts[0]),
+		C.size_t(len(cParts)),
+		cOptions,
+		cQueue)
+
+	// Wait for result, error or context timeout
+	rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT)
+	if err != nil {
+		return nil, err
+	}
+	defer C.rd_kafka_event_destroy(rkev)
+
+	cRes := C.rd_kafka_event_CreatePartitions_result(rkev)
+
+	// Convert result from C to Go
+	var cCnt C.size_t
+	cTopicRes := C.rd_kafka_CreatePartitions_result_topics(cRes, &cCnt)
+
+	return a.cToTopicResults(cTopicRes, cCnt)
+}
+
+// AlterConfigs alters/updates cluster resource configuration.
+//
+// Updates are not transactional so they may succeed for a subset
+// of the provided resources while others fail.
+// The configuration for a particular resource is updated atomically,
+// replacing values using the provided ConfigEntrys and reverting
+// unspecified ConfigEntrys to their default values.
+//
+// Requires broker version >=0.11.0.0
+//
+// AlterConfigs will replace all existing configuration for
+// the provided resources with the new configuration given,
+// reverting all other configuration to their default values.
+//
+// Multiple resources and resource types may be set, but at most one
+// resource of type ResourceBroker is allowed per call since these
+// resource requests must be sent to the broker specified in the resource.
+func (a *AdminClient) AlterConfigs(ctx context.Context, resources []ConfigResource, options ...AlterConfigsAdminOption) (result []ConfigResourceResult, err error) {
+	cRes := make([]*C.rd_kafka_ConfigResource_t, len(resources))
+
+	cErrstrSize := C.size_t(512)
+	cErrstr := (*C.char)(C.malloc(cErrstrSize))
+	defer C.free(unsafe.Pointer(cErrstr))
+
+	// Convert Go ConfigResources to C ConfigResources
+	for i, res := range resources {
+		cRes[i] = C.rd_kafka_ConfigResource_new(
+			C.rd_kafka_ResourceType_t(res.Type), C.CString(res.Name))
+		if cRes[i] == nil {
+			return nil, newErrorFromString(ErrInvalidArg,
+				fmt.Sprintf("Invalid arguments for resource %v", res))
+		}
+
+		defer C.rd_kafka_ConfigResource_destroy(cRes[i])
+
+		for _, entry := range res.Config {
+			var cErr C.rd_kafka_resp_err_t
+			switch entry.Operation {
+			case AlterOperationSet:
+				cErr = C.rd_kafka_ConfigResource_set_config(
+					cRes[i], C.CString(entry.Name), C.CString(entry.Value))
+			default:
+				panic(fmt.Sprintf("Invalid ConfigEntry.Operation: %v", entry.Operation))
+			}
+
+			if cErr != 0 {
+				return nil,
+					newCErrorFromString(cErr,
+						fmt.Sprintf("Failed to add configuration %s: %s",
+							entry, C.GoString(C.rd_kafka_err2str(cErr))))
+			}
+		}
+	}
+
+	// Convert Go AdminOptions (if any) to C AdminOptions
+	genericOptions := make([]AdminOption, len(options))
+	for i := range options {
+		genericOptions[i] = options[i]
+	}
+	cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_ALTERCONFIGS, genericOptions)
+	if err != nil {
+		return nil, err
+	}
+	defer C.rd_kafka_AdminOptions_destroy(cOptions)
+
+	// Create temporary queue for async operation
+	cQueue := C.rd_kafka_queue_new(a.handle.rk)
+	defer C.rd_kafka_queue_destroy(cQueue)
+
+	// Asynchronous call
+	C.rd_kafka_AlterConfigs(
+		a.handle.rk,
+		(**C.rd_kafka_ConfigResource_t)(&cRes[0]),
+		C.size_t(len(cRes)),
+		cOptions,
+		cQueue)
+
+	// Wait for result, error or context timeout
+	rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_ALTERCONFIGS_RESULT)
+	if err != nil {
+		return nil, err
+	}
+	defer C.rd_kafka_event_destroy(rkev)
+
+	cResult := C.rd_kafka_event_AlterConfigs_result(rkev)
+
+	// Convert results from C to Go
+	var cCnt C.size_t
+	cResults := C.rd_kafka_AlterConfigs_result_resources(cResult, &cCnt)
+
+	return a.cConfigResourceToResult(cResults, cCnt)
+}
+
+// DescribeConfigs retrieves configuration for cluster resources.
+//
+// The returned configuration includes default values, use
+// ConfigEntryResult.IsDefault or ConfigEntryResult.Source to distinguish
+// default values from manually configured settings.
+//
+// The value of config entries where .IsSensitive is true
+// will always be nil to avoid disclosing sensitive
+// information, such as security settings.
+//
+// Configuration entries where .IsReadOnly is true can't be modified
+// (with AlterConfigs).
+//
+// Synonym configuration entries are returned if the broker supports
+// it (broker version >= 1.1.0). See .Synonyms.
+//
+// Requires broker version >=0.11.0.0
+//
+// Multiple resources and resource types may be requested, but at most
+// one resource of type ResourceBroker is allowed per call
+// since these resource requests must be sent to the broker specified
+// in the resource.
+func (a *AdminClient) DescribeConfigs(ctx context.Context, resources []ConfigResource, options ...DescribeConfigsAdminOption) (result []ConfigResourceResult, err error) {
+	cRes := make([]*C.rd_kafka_ConfigResource_t, len(resources))
+
+	cErrstrSize := C.size_t(512)
+	cErrstr := (*C.char)(C.malloc(cErrstrSize))
+	defer C.free(unsafe.Pointer(cErrstr))
+
+	// Convert Go ConfigResources to C ConfigResources
+	for i, res := range resources {
+		cRes[i] = C.rd_kafka_ConfigResource_new(
+			C.rd_kafka_ResourceType_t(res.Type), C.CString(res.Name))
+		if cRes[i] == nil {
+			return nil, newErrorFromString(ErrInvalidArg,
+				fmt.Sprintf("Invalid arguments for resource %v", res))
+		}
+
+		defer C.rd_kafka_ConfigResource_destroy(cRes[i])
+	}
+
+	// Convert Go AdminOptions (if any) to C AdminOptions
+	genericOptions := make([]AdminOption, len(options))
+	for i := range options {
+		genericOptions[i] = options[i]
+	}
+	cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS, genericOptions)
+	if err != nil {
+		return nil, err
+	}
+	defer C.rd_kafka_AdminOptions_destroy(cOptions)
+
+	// Create temporary queue for async operation
+	cQueue := C.rd_kafka_queue_new(a.handle.rk)
+	defer C.rd_kafka_queue_destroy(cQueue)
+
+	// Asynchronous call
+	C.rd_kafka_DescribeConfigs(
+		a.handle.rk,
+		(**C.rd_kafka_ConfigResource_t)(&cRes[0]),
+		C.size_t(len(cRes)),
+		cOptions,
+		cQueue)
+
+	// Wait for result, error or context timeout
+	rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT)
+	if err != nil {
+		return nil, err
+	}
+	defer C.rd_kafka_event_destroy(rkev)
+
+	cResult := C.rd_kafka_event_DescribeConfigs_result(rkev)
+
+	// Convert results from C to Go
+	var cCnt C.size_t
+	cResults := C.rd_kafka_DescribeConfigs_result_resources(cResult, &cCnt)
+
+	return a.cConfigResourceToResult(cResults, cCnt)
+}
+
+// GetMetadata queries broker for cluster and topic metadata.
+// If topic is non-nil only information about that topic is returned, else if
+// allTopics is false only information about locally used topics is returned,
+// else information about all topics is returned.
+// GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API.
+func (a *AdminClient) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) {
+	return getMetadata(a, topic, allTopics, timeoutMs)
+}
+
+// String returns a human readable name for an AdminClient instance
+func (a *AdminClient) String() string {
+	return fmt.Sprintf("admin-%s", a.handle.String())
+}
+
+// get_handle implements the Handle interface
+func (a *AdminClient) gethandle() *handle {
+	return a.handle
+}
+
+// Close an AdminClient instance.
+func (a *AdminClient) Close() {
+	if a.isDerived {
+		// Derived AdminClient needs no cleanup.
+		a.handle = &handle{}
+		return
+	}
+
+	a.handle.cleanup()
+
+	C.rd_kafka_destroy(a.handle.rk)
+}
+
+// NewAdminClient creats a new AdminClient instance with a new underlying client instance
+func NewAdminClient(conf *ConfigMap) (*AdminClient, error) {
+
+	err := versionCheck()
+	if err != nil {
+		return nil, err
+	}
+
+	a := &AdminClient{}
+	a.handle = &handle{}
+
+	// Convert ConfigMap to librdkafka conf_t
+	cConf, err := conf.convert()
+	if err != nil {
+		return nil, err
+	}
+
+	cErrstr := (*C.char)(C.malloc(C.size_t(256)))
+	defer C.free(unsafe.Pointer(cErrstr))
+
+	// Create librdkafka producer instance. The Producer is somewhat cheaper than
+	// the consumer, but any instance type can be used for Admin APIs.
+	a.handle.rk = C.rd_kafka_new(C.RD_KAFKA_PRODUCER, cConf, cErrstr, 256)
+	if a.handle.rk == nil {
+		return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr)
+	}
+
+	a.isDerived = false
+	a.handle.setup()
+
+	return a, nil
+}
+
+// NewAdminClientFromProducer derives a new AdminClient from an existing Producer instance.
+// The AdminClient will use the same configuration and connections as the parent instance.
+func NewAdminClientFromProducer(p *Producer) (a *AdminClient, err error) {
+	if p.handle.rk == nil {
+		return nil, newErrorFromString(ErrInvalidArg, "Can't derive AdminClient from closed producer")
+	}
+
+	a = &AdminClient{}
+	a.handle = &p.handle
+	a.isDerived = true
+	return a, nil
+}
+
+// NewAdminClientFromConsumer derives a new AdminClient from an existing Consumer instance.
+// The AdminClient will use the same configuration and connections as the parent instance.
+func NewAdminClientFromConsumer(c *Consumer) (a *AdminClient, err error) {
+	if c.handle.rk == nil {
+		return nil, newErrorFromString(ErrInvalidArg, "Can't derive AdminClient from closed consumer")
+	}
+
+	a = &AdminClient{}
+	a.handle = &c.handle
+	a.isDerived = true
+	return a, nil
+}
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminoptions.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminoptions.go
new file mode 100644
index 0000000..19a8be0
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminoptions.go
@@ -0,0 +1,264 @@
+/**
+ * Copyright 2018 Confluent Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka
+
+import (
+	"fmt"
+	"time"
+	"unsafe"
+)
+
+/*
+#include <librdkafka/rdkafka.h>
+#include <stdlib.h>
+*/
+import "C"
+
+// AdminOptionOperationTimeout sets the broker's operation timeout, such as the
+// timeout for CreateTopics to complete the creation of topics on the controller
+// before returning a result to the application.
+//
+// CreateTopics, DeleteTopics, CreatePartitions:
+// a value 0 will return immediately after triggering topic
+// creation, while > 0 will wait this long for topic creation to propagate
+// in cluster.
+//
+// Default: 0 (return immediately).
+//
+// Valid for CreateTopics, DeleteTopics, CreatePartitions.
+type AdminOptionOperationTimeout struct {
+	isSet bool
+	val   time.Duration
+}
+
+func (ao AdminOptionOperationTimeout) supportsCreateTopics() {
+}
+func (ao AdminOptionOperationTimeout) supportsDeleteTopics() {
+}
+func (ao AdminOptionOperationTimeout) supportsCreatePartitions() {
+}
+
+func (ao AdminOptionOperationTimeout) apply(cOptions *C.rd_kafka_AdminOptions_t) error {
+	if !ao.isSet {
+		return nil
+	}
+
+	cErrstrSize := C.size_t(512)
+	cErrstr := (*C.char)(C.malloc(cErrstrSize))
+	defer C.free(unsafe.Pointer(cErrstr))
+
+	cErr := C.rd_kafka_AdminOptions_set_operation_timeout(
+		cOptions, C.int(durationToMilliseconds(ao.val)),
+		cErrstr, cErrstrSize)
+	if cErr != 0 {
+		C.rd_kafka_AdminOptions_destroy(cOptions)
+		return newCErrorFromString(cErr,
+			fmt.Sprintf("Failed to set operation timeout: %s", C.GoString(cErrstr)))
+
+	}
+
+	return nil
+}
+
+// SetAdminOperationTimeout sets the broker's operation timeout, such as the
+// timeout for CreateTopics to complete the creation of topics on the controller
+// before returning a result to the application.
+//
+// CreateTopics, DeleteTopics, CreatePartitions:
+// a value 0 will return immediately after triggering topic
+// creation, while > 0 will wait this long for topic creation to propagate
+// in cluster.
+//
+// Default: 0 (return immediately).
+//
+// Valid for CreateTopics, DeleteTopics, CreatePartitions.
+func SetAdminOperationTimeout(t time.Duration) (ao AdminOptionOperationTimeout) {
+	ao.isSet = true
+	ao.val = t
+	return ao
+}
+
+// AdminOptionRequestTimeout sets the overall request timeout, including broker
+// lookup, request transmission, operation time on broker, and response.
+//
+// Default: `socket.timeout.ms`.
+//
+// Valid for all Admin API methods.
+type AdminOptionRequestTimeout struct {
+	isSet bool
+	val   time.Duration
+}
+
+func (ao AdminOptionRequestTimeout) supportsCreateTopics() {
+}
+func (ao AdminOptionRequestTimeout) supportsDeleteTopics() {
+}
+func (ao AdminOptionRequestTimeout) supportsCreatePartitions() {
+}
+func (ao AdminOptionRequestTimeout) supportsAlterConfigs() {
+}
+func (ao AdminOptionRequestTimeout) supportsDescribeConfigs() {
+}
+
+func (ao AdminOptionRequestTimeout) apply(cOptions *C.rd_kafka_AdminOptions_t) error {
+	if !ao.isSet {
+		return nil
+	}
+
+	cErrstrSize := C.size_t(512)
+	cErrstr := (*C.char)(C.malloc(cErrstrSize))
+	defer C.free(unsafe.Pointer(cErrstr))
+
+	cErr := C.rd_kafka_AdminOptions_set_request_timeout(
+		cOptions, C.int(durationToMilliseconds(ao.val)),
+		cErrstr, cErrstrSize)
+	if cErr != 0 {
+		C.rd_kafka_AdminOptions_destroy(cOptions)
+		return newCErrorFromString(cErr,
+			fmt.Sprintf("%s", C.GoString(cErrstr)))
+
+	}
+
+	return nil
+}
+
+// SetAdminRequestTimeout sets the overall request timeout, including broker
+// lookup, request transmission, operation time on broker, and response.
+//
+// Default: `socket.timeout.ms`.
+//
+// Valid for all Admin API methods.
+func SetAdminRequestTimeout(t time.Duration) (ao AdminOptionRequestTimeout) {
+	ao.isSet = true
+	ao.val = t
+	return ao
+}
+
+// AdminOptionValidateOnly tells the broker to only validate the request,
+// without performing the requested operation (create topics, etc).
+//
+// Default: false.
+//
+// Valid for CreateTopics, CreatePartitions, AlterConfigs
+type AdminOptionValidateOnly struct {
+	isSet bool
+	val   bool
+}
+
+func (ao AdminOptionValidateOnly) supportsCreateTopics() {
+}
+func (ao AdminOptionValidateOnly) supportsCreatePartitions() {
+}
+func (ao AdminOptionValidateOnly) supportsAlterConfigs() {
+}
+
+func (ao AdminOptionValidateOnly) apply(cOptions *C.rd_kafka_AdminOptions_t) error {
+	if !ao.isSet {
+		return nil
+	}
+
+	cErrstrSize := C.size_t(512)
+	cErrstr := (*C.char)(C.malloc(cErrstrSize))
+	defer C.free(unsafe.Pointer(cErrstr))
+
+	cErr := C.rd_kafka_AdminOptions_set_validate_only(
+		cOptions, bool2cint(ao.val),
+		cErrstr, cErrstrSize)
+	if cErr != 0 {
+		C.rd_kafka_AdminOptions_destroy(cOptions)
+		return newCErrorFromString(cErr,
+			fmt.Sprintf("%s", C.GoString(cErrstr)))
+
+	}
+
+	return nil
+}
+
+// SetAdminValidateOnly tells the broker to only validate the request,
+// without performing the requested operation (create topics, etc).
+//
+// Default: false.
+//
+// Valid for CreateTopics, DeleteTopics, CreatePartitions, AlterConfigs
+func SetAdminValidateOnly(validateOnly bool) (ao AdminOptionValidateOnly) {
+	ao.isSet = true
+	ao.val = validateOnly
+	return ao
+}
+
+// CreateTopicsAdminOption - see setters.
+//
+// See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly.
+type CreateTopicsAdminOption interface {
+	supportsCreateTopics()
+	apply(cOptions *C.rd_kafka_AdminOptions_t) error
+}
+
+// DeleteTopicsAdminOption - see setters.
+//
+// See SetAdminRequestTimeout, SetAdminOperationTimeout.
+type DeleteTopicsAdminOption interface {
+	supportsDeleteTopics()
+	apply(cOptions *C.rd_kafka_AdminOptions_t) error
+}
+
+// CreatePartitionsAdminOption - see setters.
+//
+// See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly.
+type CreatePartitionsAdminOption interface {
+	supportsCreatePartitions()
+	apply(cOptions *C.rd_kafka_AdminOptions_t) error
+}
+
+// AlterConfigsAdminOption - see setters.
+//
+// See SetAdminRequestTimeout, SetAdminValidateOnly, SetAdminIncremental.
+type AlterConfigsAdminOption interface {
+	supportsAlterConfigs()
+	apply(cOptions *C.rd_kafka_AdminOptions_t) error
+}
+
+// DescribeConfigsAdminOption - see setters.
+//
+// See SetAdminRequestTimeout.
+type DescribeConfigsAdminOption interface {
+	supportsDescribeConfigs()
+	apply(cOptions *C.rd_kafka_AdminOptions_t) error
+}
+
+// AdminOption is a generic type not to be used directly.
+//
+// See CreateTopicsAdminOption et.al.
+type AdminOption interface {
+	apply(cOptions *C.rd_kafka_AdminOptions_t) error
+}
+
+func adminOptionsSetup(h *handle, opType C.rd_kafka_admin_op_t, options []AdminOption) (*C.rd_kafka_AdminOptions_t, error) {
+
+	cOptions := C.rd_kafka_AdminOptions_new(h.rk, opType)
+	for _, opt := range options {
+		if opt == nil {
+			continue
+		}
+		err := opt.apply(cOptions)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return cOptions, nil
+}
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/api.html b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/api.html
new file mode 100644
index 0000000..05c8fed
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/api.html
@@ -0,0 +1,1632 @@
+<!DOCTYPE html>
+<html>
+ <head>
+  <meta content="text/html; charset=utf-8" http-equiv="Content-Type">
+   <meta content="width=device-width, initial-scale=1" name="viewport">
+    <meta content="#375EAB" name="theme-color">
+     <title>
+      kafka - The Go Programming Language
+     </title>
+     <link href="http://golang.org/lib/godoc/style.css" rel="stylesheet" type="text/css">
+      <link href="http://golang.org/lib/godoc/jquery.treeview.css" rel="stylesheet">
+       <script type="text/javascript">
+        window.initFuncs = [];
+       </script>
+      </link>
+     </link>
+    </meta>
+   </meta>
+  </meta>
+ </head>
+ <body>
+  <div id="lowframe" style="position: fixed; bottom: 0; left: 0; height: 0; width: 100%; border-top: thin solid grey; background-color: white; overflow: auto;">
+   ...
+  </div>
+  <!-- #lowframe -->
+  <div class="wide" id="page">
+   <div class="container">
+    <h1>
+     Package kafka
+    </h1>
+    <div id="nav">
+    </div>
+    <!--
+	Copyright 2009 The Go Authors. All rights reserved.
+	Use of this source code is governed by a BSD-style
+	license that can be found in the LICENSE file.
+-->
+    <!--
+	Note: Static (i.e., not template-generated) href and id
+	attributes start with "pkg-" to make it impossible for
+	them to conflict with generated attributes (some of which
+	correspond to Go identifiers).
+-->
+    <script type="text/javascript">
+     document.ANALYSIS_DATA = null;
+	document.CALLGRAPH = null;
+    </script>
+    <div id="short-nav">
+     <dl>
+      <dd>
+       <code>
+        import "github.com/confluentinc/confluent-kafka-go/kafka"
+       </code>
+      </dd>
+     </dl>
+     <dl>
+      <dd>
+       <a class="overviewLink" href="#pkg-overview">
+        Overview
+       </a>
+      </dd>
+      <dd>
+       <a class="indexLink" href="#pkg-index">
+        Index
+       </a>
+      </dd>
+      <dd>
+      </dd>
+     </dl>
+    </div>
+    <!-- The package's Name is printed as title by the top-level template -->
+    <div class="toggleVisible" id="pkg-overview">
+     <div class="collapsed">
+      <h2 class="toggleButton" title="Click to show Overview section">
+       Overview ▹
+      </h2>
+     </div>
+     <div class="expanded">
+      <h2 class="toggleButton" title="Click to hide Overview section">
+       Overview ▾
+      </h2>
+      <p>
+       Package kafka provides high-level Apache Kafka producer and consumers
+using bindings on-top of the librdkafka C library.
+      </p>
+      <h3 id="hdr-High_level_Consumer">
+       High-level Consumer
+      </h3>
+      <p>
+       * Decide if you want to read messages and events from the `.Events()` channel
+(set `"go.events.channel.enable": true`) or by calling `.Poll()`.
+      </p>
+      <p>
+       * Create a Consumer with `kafka.NewConsumer()` providing at
+least the `bootstrap.servers` and `group.id` configuration properties.
+      </p>
+      <p>
+       * Call `.Subscribe()` or (`.SubscribeTopics()` to subscribe to multiple topics)
+to join the group with the specified subscription set.
+Subscriptions are atomic, calling `.Subscribe*()` again will leave
+the group and rejoin with the new set of topics.
+      </p>
+      <p>
+       * Start reading events and messages from either the `.Events` channel
+or by calling `.Poll()`.
+      </p>
+      <p>
+       * When the group has rebalanced each client member is assigned a
+(sub-)set of topic+partitions.
+By default the consumer will start fetching messages for its assigned
+partitions at this point, but your application may enable rebalance
+events to get an insight into what the assigned partitions where
+as well as set the initial offsets. To do this you need to pass
+`"go.application.rebalance.enable": true` to the `NewConsumer()` call
+mentioned above. You will (eventually) see a `kafka.AssignedPartitions` event
+with the assigned partition set. You can optionally modify the initial
+offsets (they'll default to stored offsets and if there are no previously stored
+offsets it will fall back to `"default.topic.config": ConfigMap{"auto.offset.reset": ..}`
+which defaults to the `latest` message) and then call `.Assign(partitions)`
+to start consuming. If you don't need to modify the initial offsets you will
+not need to call `.Assign()`, the client will do so automatically for you if
+you dont.
+      </p>
+      <p>
+       * As messages are fetched they will be made available on either the
+`.Events` channel or by calling `.Poll()`, look for event type `*kafka.Message`.
+      </p>
+      <p>
+       * Handle messages, events and errors to your liking.
+      </p>
+      <p>
+       * When you are done consuming call `.Close()` to commit final offsets
+and leave the consumer group.
+      </p>
+      <h3 id="hdr-Producer">
+       Producer
+      </h3>
+      <p>
+       * Create a Producer with `kafka.NewProducer()` providing at least
+the `bootstrap.servers` configuration properties.
+      </p>
+      <p>
+       * Messages may now be produced either by sending a `*kafka.Message`
+on the `.ProduceChannel` or by calling `.Produce()`.
+      </p>
+      <p>
+       * Producing is an asynchronous operation so the client notifies the application
+of per-message produce success or failure through something called delivery reports.
+Delivery reports are by default emitted on the `.Events()` channel as `*kafka.Message`
+and you should check `msg.TopicPartition.Error` for `nil` to find out if the message
+was succesfully delivered or not.
+It is also possible to direct delivery reports to alternate channels
+by providing a non-nil `chan Event` channel to `.Produce()`.
+If no delivery reports are wanted they can be completely disabled by
+setting configuration property `"go.delivery.reports": false`.
+      </p>
+      <p>
+       * When you are done producing messages you will need to make sure all messages
+are indeed delivered to the broker (or failed), remember that this is
+an asynchronous client so some of your messages may be lingering in internal
+channels or tranmission queues.
+To do this you can either keep track of the messages you've produced
+and wait for their corresponding delivery reports, or call the convenience
+function `.Flush()` that will block until all message deliveries are done
+or the provided timeout elapses.
+      </p>
+      <p>
+       * Finally call `.Close()` to decommission the producer.
+      </p>
+      <h3 id="hdr-Events">
+       Events
+      </h3>
+      <p>
+       Apart from emitting messages and delivery reports the client also communicates
+with the application through a number of different event types.
+An application may choose to handle or ignore these events.
+      </p>
+      <h3 id="hdr-Consumer_events">
+       Consumer events
+      </h3>
+      <p>
+       * `*kafka.Message` - a fetched message.
+      </p>
+      <p>
+       * `AssignedPartitions` - The assigned partition set for this client following a rebalance.
+Requires `go.application.rebalance.enable`
+      </p>
+      <p>
+       * `RevokedPartitions` - The counter part to `AssignedPartitions` following a rebalance.
+`AssignedPartitions` and `RevokedPartitions` are symetrical.
+Requires `go.application.rebalance.enable`
+      </p>
+      <p>
+       * `PartitionEOF` - Consumer has reached the end of a partition.
+NOTE: The consumer will keep trying to fetch new messages for the partition.
+      </p>
+      <p>
+       * `OffsetsCommitted` - Offset commit results (when `enable.auto.commit` is enabled).
+      </p>
+      <h3 id="hdr-Producer_events">
+       Producer events
+      </h3>
+      <p>
+       * `*kafka.Message` - delivery report for produced message.
+Check `.TopicPartition.Error` for delivery result.
+      </p>
+      <h3 id="hdr-Generic_events_for_both_Consumer_and_Producer">
+       Generic events for both Consumer and Producer
+      </h3>
+      <p>
+       * `KafkaError` - client (error codes are prefixed with _) or broker error.
+These errors are normally just informational since the
+client will try its best to automatically recover (eventually).
+      </p>
+      <p>
+       Hint: If your application registers a signal notification
+(signal.Notify) makes sure the signals channel is buffered to avoid
+possible complications with blocking Poll() calls.
+      </p>
+     </div>
+    </div>
+    <div class="toggleVisible" id="pkg-index">
+     <div class="collapsed">
+      <h2 class="toggleButton" title="Click to show Index section">
+       Index ▹
+      </h2>
+     </div>
+     <div class="expanded">
+      <h2 class="toggleButton" title="Click to hide Index section">
+       Index ▾
+      </h2>
+      <!-- Table of contents for API; must be named manual-nav to turn off auto nav. -->
+      <div id="manual-nav">
+       <dl>
+        <dd>
+         <a href="#pkg-constants">
+          Constants
+         </a>
+        </dd>
+        <dd>
+         <a href="#LibraryVersion">
+          func LibraryVersion() (int, string)
+         </a>
+        </dd>
+        <dd>
+         <a href="#AssignedPartitions">
+          type AssignedPartitions
+         </a>
+        </dd>
+        <dd>
+         <a href="#AssignedPartitions.String">
+          func (e AssignedPartitions) String() string
+         </a>
+        </dd>
+        <dd>
+         <a href="#BrokerMetadata">
+          type BrokerMetadata
+         </a>
+        </dd>
+        <dd>
+         <a href="#ConfigMap">
+          type ConfigMap
+         </a>
+        </dd>
+        <dd>
+         <a href="#ConfigMap.Set">
+          func (m ConfigMap) Set(kv string) error
+         </a>
+        </dd>
+        <dd>
+         <a href="#ConfigMap.SetKey">
+          func (m ConfigMap) SetKey(key string, value ConfigValue) error
+         </a>
+        </dd>
+        <dd>
+         <a href="#ConfigValue">
+          type ConfigValue
+         </a>
+        </dd>
+        <dd>
+         <a href="#Consumer">
+          type Consumer
+         </a>
+        </dd>
+        <dd>
+         <a href="#NewConsumer">
+          func NewConsumer(conf *ConfigMap) (*Consumer, error)
+         </a>
+        </dd>
+        <dd>
+         <a href="#Consumer.Assign">
+          func (c *Consumer) Assign(partitions []TopicPartition) (err error)
+         </a>
+        </dd>
+        <dd>
+         <a href="#Consumer.Close">
+          func (c *Consumer) Close() (err error)
+         </a>
+        </dd>
+        <dd>
+         <a href="#Consumer.Commit">
+          func (c *Consumer) Commit() ([]TopicPartition, error)
+         </a>
+        </dd>
+        <dd>
+         <a href="#Consumer.CommitMessage">
+          func (c *Consumer) CommitMessage(m *Message) ([]TopicPartition, error)
+         </a>
+        </dd>
+        <dd>
+         <a href="#Consumer.CommitOffsets">
+          func (c *Consumer) CommitOffsets(offsets []TopicPartition) ([]TopicPartition, error)
+         </a>
+        </dd>
+        <dd>
+         <a href="#Consumer.Events">
+          func (c *Consumer) Events() chan Event
+         </a>
+        </dd>
+        <dd>
+         <a href="#Consumer.GetMetadata">
+          func (c *Consumer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)
+         </a>
+        </dd>
+        <dd>
+         <a href="#Consumer.Poll">
+          func (c *Consumer) Poll(timeoutMs int) (event Event)
+         </a>
+        </dd>
+        <dd>
+         <a href="#Consumer.QueryWatermarkOffsets">
+          func (c *Consumer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error)
+         </a>
+        </dd>
+        <dd>
+         <a href="#Consumer.String">
+          func (c *Consumer) String() string
+         </a>
+        </dd>
+        <dd>
+         <a href="#Consumer.Subscribe">
+          func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error
+         </a>
+        </dd>
+        <dd>
+         <a href="#Consumer.SubscribeTopics">
+          func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) (err error)
+         </a>
+        </dd>
+        <dd>
+         <a href="#Consumer.Unassign">
+          func (c *Consumer) Unassign() (err error)
+         </a>
+        </dd>
+        <dd>
+         <a href="#Consumer.Unsubscribe">
+          func (c *Consumer) Unsubscribe() (err error)
+         </a>
+        </dd>
+        <dd>
+         <a href="#Error">
+          type Error
+         </a>
+        </dd>
+        <dd>
+         <a href="#Error.Code">
+          func (e Error) Code() ErrorCode
+         </a>
+        </dd>
+        <dd>
+         <a href="#Error.Error">
+          func (e Error) Error() string
+         </a>
+        </dd>
+        <dd>
+         <a href="#Error.String">
+          func (e Error) String() string
+         </a>
+        </dd>
+        <dd>
+         <a href="#ErrorCode">
+          type ErrorCode
+         </a>
+        </dd>
+        <dd>
+         <a href="#ErrorCode.String">
+          func (c ErrorCode) String() string
+         </a>
+        </dd>
+        <dd>
+         <a href="#Event">
+          type Event
+         </a>
+        </dd>
+        <dd>
+         <a href="#Handle">
+          type Handle
+         </a>
+        </dd>
+        <dd>
+         <a href="#Message">
+          type Message
+         </a>
+        </dd>
+        <dd>
+         <a href="#Message.String">
+          func (m *Message) String() string
+         </a>
+        </dd>
+        <dd>
+         <a href="#Metadata">
+          type Metadata
+         </a>
+        </dd>
+        <dd>
+         <a href="#Offset">
+          type Offset
+         </a>
+        </dd>
+        <dd>
+         <a href="#NewOffset">
+          func NewOffset(offset interface{}) (Offset, error)
+         </a>
+        </dd>
+        <dd>
+         <a href="#OffsetTail">
+          func OffsetTail(relativeOffset Offset) Offset
+         </a>
+        </dd>
+        <dd>
+         <a href="#Offset.Set">
+          func (o Offset) Set(offset interface{}) error
+         </a>
+        </dd>
+        <dd>
+         <a href="#Offset.String">
+          func (o Offset) String() string
+         </a>
+        </dd>
+        <dd>
+         <a href="#OffsetsCommitted">
+          type OffsetsCommitted
+         </a>
+        </dd>
+        <dd>
+         <a href="#OffsetsCommitted.String">
+          func (o OffsetsCommitted) String() string
+         </a>
+        </dd>
+        <dd>
+         <a href="#PartitionEOF">
+          type PartitionEOF
+         </a>
+        </dd>
+        <dd>
+         <a href="#PartitionEOF.String">
+          func (p PartitionEOF) String() string
+         </a>
+        </dd>
+        <dd>
+         <a href="#PartitionMetadata">
+          type PartitionMetadata
+         </a>
+        </dd>
+        <dd>
+         <a href="#Producer">
+          type Producer
+         </a>
+        </dd>
+        <dd>
+         <a href="#NewProducer">
+          func NewProducer(conf *ConfigMap) (*Producer, error)
+         </a>
+        </dd>
+        <dd>
+         <a href="#Producer.Close">
+          func (p *Producer) Close()
+         </a>
+        </dd>
+        <dd>
+         <a href="#Producer.Events">
+          func (p *Producer) Events() chan Event
+         </a>
+        </dd>
+        <dd>
+         <a href="#Producer.Flush">
+          func (p *Producer) Flush(timeoutMs int) int
+         </a>
+        </dd>
+        <dd>
+         <a href="#Producer.GetMetadata">
+          func (p *Producer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)
+         </a>
+        </dd>
+        <dd>
+         <a href="#Producer.Len">
+          func (p *Producer) Len() int
+         </a>
+        </dd>
+        <dd>
+         <a href="#Producer.Produce">
+          func (p *Producer) Produce(msg *Message, deliveryChan chan Event) error
+         </a>
+        </dd>
+        <dd>
+         <a href="#Producer.ProduceChannel">
+          func (p *Producer) ProduceChannel() chan *Message
+         </a>
+        </dd>
+        <dd>
+         <a href="#Producer.QueryWatermarkOffsets">
+          func (p *Producer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error)
+         </a>
+        </dd>
+        <dd>
+         <a href="#Producer.String">
+          func (p *Producer) String() string
+         </a>
+        </dd>
+        <dd>
+         <a href="#RebalanceCb">
+          type RebalanceCb
+         </a>
+        </dd>
+        <dd>
+         <a href="#RevokedPartitions">
+          type RevokedPartitions
+         </a>
+        </dd>
+        <dd>
+         <a href="#RevokedPartitions.String">
+          func (e RevokedPartitions) String() string
+         </a>
+        </dd>
+        <dd>
+         <a href="#TimestampType">
+          type TimestampType
+         </a>
+        </dd>
+        <dd>
+         <a href="#TimestampType.String">
+          func (t TimestampType) String() string
+         </a>
+        </dd>
+        <dd>
+         <a href="#TopicMetadata">
+          type TopicMetadata
+         </a>
+        </dd>
+        <dd>
+         <a href="#TopicPartition">
+          type TopicPartition
+         </a>
+        </dd>
+        <dd>
+         <a href="#TopicPartition.String">
+          func (p TopicPartition) String() string
+         </a>
+        </dd>
+       </dl>
+      </div>
+      <!-- #manual-nav -->
+      <h4>
+       Package files
+      </h4>
+      <p>
+       <span style="font-size:90%">
+        <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/build_dynamic.go">
+         build_dynamic.go
+        </a>
+        <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/config.go">
+         config.go
+        </a>
+        <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go">
+         consumer.go
+        </a>
+        <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/error.go">
+         error.go
+        </a>
+        <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/event.go">
+         event.go
+        </a>
+        <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/generated_errors.go">
+         generated_errors.go
+        </a>
+        <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/handle.go">
+         handle.go
+        </a>
+        <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go">
+         kafka.go
+        </a>
+        <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/message.go">
+         message.go
+        </a>
+        <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/metadata.go">
+         metadata.go
+        </a>
+        <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/misc.go">
+         misc.go
+        </a>
+        <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/producer.go">
+         producer.go
+        </a>
+        <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/testhelpers.go">
+         testhelpers.go
+        </a>
+       </span>
+      </p>
+     </div>
+     <!-- .expanded -->
+    </div>
+    <!-- #pkg-index -->
+    <div class="toggle" id="pkg-callgraph" style="display: none">
+     <div class="collapsed">
+      <h2 class="toggleButton" title="Click to show Internal Call Graph section">
+       Internal call graph ▹
+      </h2>
+     </div>
+     <!-- .expanded -->
+     <div class="expanded">
+      <h2 class="toggleButton" title="Click to hide Internal Call Graph section">
+       Internal call graph ▾
+      </h2>
+      <p>
+       In the call graph viewer below, each node
+			  is a function belonging to this package
+			  and its children are the functions it
+			  calls—perhaps dynamically.
+      </p>
+      <p>
+       The root nodes are the entry points of the
+			  package: functions that may be called from
+			  outside the package.
+			  There may be non-exported or anonymous
+			  functions among them if they are called
+			  dynamically from another package.
+      </p>
+      <p>
+       Click a node to visit that function's source code.
+			  From there you can visit its callers by
+			  clicking its declaring
+       <code>
+        func
+       </code>
+       token.
+      </p>
+      <p>
+       Functions may be omitted if they were
+			  determined to be unreachable in the
+			  particular programs or tests that were
+			  analyzed.
+      </p>
+      <!-- Zero means show all package entry points. -->
+      <ul class="treeview" id="callgraph-0" style="margin-left: 0.5in">
+      </ul>
+     </div>
+    </div>
+    <!-- #pkg-callgraph -->
+    <h2 id="pkg-constants">
+     Constants
+    </h2>
+    <pre>const (
+    <span class="comment">// TimestampNotAvailable indicates no timestamp was set, or not available due to lacking broker support</span>
+    <span id="TimestampNotAvailable">TimestampNotAvailable</span> = <a href="#TimestampType">TimestampType</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_TIMESTAMP_NOT_AVAILABLE">RD_KAFKA_TIMESTAMP_NOT_AVAILABLE</a>)
+    <span class="comment">// TimestampCreateTime indicates timestamp set by producer (source time)</span>
+    <span id="TimestampCreateTime">TimestampCreateTime</span> = <a href="#TimestampType">TimestampType</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_TIMESTAMP_CREATE_TIME">RD_KAFKA_TIMESTAMP_CREATE_TIME</a>)
+    <span class="comment">// TimestampLogAppendTime indicates timestamp set set by broker (store time)</span>
+    <span id="TimestampLogAppendTime">TimestampLogAppendTime</span> = <a href="#TimestampType">TimestampType</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME">RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME</a>)
+)</pre>
+    <pre>const <span id="OffsetBeginning">OffsetBeginning</span> = <a href="#Offset">Offset</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_OFFSET_BEGINNING">RD_KAFKA_OFFSET_BEGINNING</a>)</pre>
+    <p>
+     Earliest offset (logical)
+    </p>
+    <pre>const <span id="OffsetEnd">OffsetEnd</span> = <a href="#Offset">Offset</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_OFFSET_END">RD_KAFKA_OFFSET_END</a>)</pre>
+    <p>
+     Latest offset (logical)
+    </p>
+    <pre>const <span id="OffsetInvalid">OffsetInvalid</span> = <a href="#Offset">Offset</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_OFFSET_INVALID">RD_KAFKA_OFFSET_INVALID</a>)</pre>
+    <p>
+     Invalid/unspecified offset
+    </p>
+    <pre>const <span id="OffsetStored">OffsetStored</span> = <a href="#Offset">Offset</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_OFFSET_STORED">RD_KAFKA_OFFSET_STORED</a>)</pre>
+    <p>
+     Use stored offset
+    </p>
+    <pre>const <span id="PartitionAny">PartitionAny</span> = <a href="http://golang.org/pkg/builtin/#int32">int32</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_PARTITION_UA">RD_KAFKA_PARTITION_UA</a>)</pre>
+    <p>
+     Any partition (for partitioning), or unspecified value (for all other cases)
+    </p>
+    <h2 id="LibraryVersion">
+     func
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go?s=10095:10130#L292">
+      LibraryVersion
+     </a>
+    </h2>
+    <pre>func LibraryVersion() (<a href="http://golang.org/pkg/builtin/#int">int</a>, <a href="http://golang.org/pkg/builtin/#string">string</a>)</pre>
+    <p>
+     LibraryVersion returns the underlying librdkafka library version as a
+(version_int, version_str) tuple.
+    </p>
+    <h2 id="AssignedPartitions">
+     type
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/event.go?s=1621:1684#L49">
+      AssignedPartitions
+     </a>
+    </h2>
+    <pre>type AssignedPartitions struct {
+    Partitions []<a href="#TopicPartition">TopicPartition</a>
+}</pre>
+    <p>
+     AssignedPartitions consumer group rebalance event: assigned partition set
+    </p>
+    <h3 id="AssignedPartitions.String">
+     func (AssignedPartitions)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/event.go?s=1686:1729#L53">
+      String
+     </a>
+    </h3>
+    <pre>func (e <a href="#AssignedPartitions">AssignedPartitions</a>) String() <a href="http://golang.org/pkg/builtin/#string">string</a></pre>
+    <h2 id="BrokerMetadata">
+     type
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/metadata.go?s=1266:1331#L37">
+      BrokerMetadata
+     </a>
+    </h2>
+    <pre>type BrokerMetadata struct {
+    ID   <a href="http://golang.org/pkg/builtin/#int32">int32</a>
+    Host <a href="http://golang.org/pkg/builtin/#string">string</a>
+    Port <a href="http://golang.org/pkg/builtin/#int">int</a>
+}</pre>
+    <p>
+     BrokerMetadata contains per-broker metadata
+    </p>
+    <h2 id="ConfigMap">
+     type
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/config.go?s=1172:1209#L31">
+      ConfigMap
+     </a>
+    </h2>
+    <pre>type ConfigMap map[<a href="http://golang.org/pkg/builtin/#string">string</a>]<a href="#ConfigValue">ConfigValue</a></pre>
+    <p>
+     ConfigMap is a map contaning standard librdkafka configuration properties as documented in:
+     <a href="https://github.com/edenhill/librdkafka/tree/master/CONFIGURATION.md">
+      https://github.com/edenhill/librdkafka/tree/master/CONFIGURATION.md
+     </a>
+    </p>
+    <p>
+     The special property "default.topic.config" (optional) is a ConfigMap containing default topic
+configuration properties.
+    </p>
+    <h3 id="ConfigMap.Set">
+     func (ConfigMap)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/config.go?s=1813:1852#L52">
+      Set
+     </a>
+    </h3>
+    <pre>func (m <a href="#ConfigMap">ConfigMap</a>) Set(kv <a href="http://golang.org/pkg/builtin/#string">string</a>) <a href="http://golang.org/pkg/builtin/#error">error</a></pre>
+    <p>
+     Set implements flag.Set (command line argument parser) as a convenience
+for `-X key=value` config.
+    </p>
+    <h3 id="ConfigMap.SetKey">
+     func (ConfigMap)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/config.go?s=1370:1432#L36">
+      SetKey
+     </a>
+    </h3>
+    <pre>func (m <a href="#ConfigMap">ConfigMap</a>) SetKey(key <a href="http://golang.org/pkg/builtin/#string">string</a>, value <a href="#ConfigValue">ConfigValue</a>) <a href="http://golang.org/pkg/builtin/#error">error</a></pre>
+    <p>
+     SetKey sets configuration property key to value.
+For user convenience a key prefixed with {topic}. will be
+set on the "default.topic.config" sub-map.
+    </p>
+    <h2 id="ConfigValue">
+     type
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/config.go?s=846:874#L24">
+      ConfigValue
+     </a>
+    </h2>
+    <pre>type ConfigValue interface{}</pre>
+    <p>
+     ConfigValue supports the following types:
+    </p>
+    <pre>bool, int, string, any type with the standard String() interface
+</pre>
+    <h2 id="Consumer">
+     type
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go?s=968:1205#L25">
+      Consumer
+     </a>
+    </h2>
+    <pre>type Consumer struct {
+    <span class="comment">// contains filtered or unexported fields</span>
+}</pre>
+    <p>
+     Consumer implements a High-level Apache Kafka Consumer instance
+    </p>
+    <h3 id="NewConsumer">
+     func
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go?s=7696:7748#L242">
+      NewConsumer
+     </a>
+    </h3>
+    <pre>func NewConsumer(conf *<a href="#ConfigMap">ConfigMap</a>) (*<a href="#Consumer">Consumer</a>, <a href="http://golang.org/pkg/builtin/#error">error</a>)</pre>
+    <p>
+     NewConsumer creates a new high-level Consumer instance.
+    </p>
+    <p>
+     Supported special configuration properties:
+    </p>
+    <pre>go.application.rebalance.enable (bool, false) - Forward rebalancing responsibility to application via the Events() channel.
+                                     If set to true the app must handle the AssignedPartitions and
+                                     RevokedPartitions events and call Assign() and Unassign()
+                                     respectively.
+go.events.channel.enable (bool, false) - Enable the Events() channel. Messages and events will be pushed on the Events() channel and the Poll() interface will be disabled. (Experimental)
+go.events.channel.size (int, 1000) - Events() channel size
+</pre>
+    <p>
+     WARNING: Due to the buffering nature of channels (and queues in general) the
+use of the events channel risks receiving outdated events and
+messages. Minimizing go.events.channel.size reduces the risk
+and number of outdated events and messages but does not eliminate
+the factor completely. With a channel size of 1 at most one
+event or message may be outdated.
+    </p>
+    <h3 id="Consumer.Assign">
+     func (*Consumer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go?s=2641:2707#L82">
+      Assign
+     </a>
+    </h3>
+    <pre>func (c *<a href="#Consumer">Consumer</a>) Assign(partitions []<a href="#TopicPartition">TopicPartition</a>) (err <a href="http://golang.org/pkg/builtin/#error">error</a>)</pre>
+    <p>
+     Assign an atomic set of partitions to consume.
+This replaces the current assignment.
+    </p>
+    <h3 id="Consumer.Close">
+     func (*Consumer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go?s=6121:6159#L202">
+      Close
+     </a>
+    </h3>
+    <pre>func (c *<a href="#Consumer">Consumer</a>) Close() (err <a href="http://golang.org/pkg/builtin/#error">error</a>)</pre>
+    <p>
+     Close Consumer instance.
+The object is no longer usable after this call.
+    </p>
+    <h3 id="Consumer.Commit">
+     func (*Consumer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go?s=4853:4906#L159">
+      Commit
+     </a>
+    </h3>
+    <pre>func (c *<a href="#Consumer">Consumer</a>) Commit() ([]<a href="#TopicPartition">TopicPartition</a>, <a href="http://golang.org/pkg/builtin/#error">error</a>)</pre>
+    <p>
+     Commit offsets for currently assigned partitions
+This is a blocking call.
+Returns the committed offsets on success.
+    </p>
+    <h3 id="Consumer.CommitMessage">
+     func (*Consumer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go?s=5070:5140#L166">
+      CommitMessage
+     </a>
+    </h3>
+    <pre>func (c *<a href="#Consumer">Consumer</a>) CommitMessage(m *<a href="#Message">Message</a>) ([]<a href="#TopicPartition">TopicPartition</a>, <a href="http://golang.org/pkg/builtin/#error">error</a>)</pre>
+    <p>
+     CommitMessage commits offset based on the provided message.
+This is a blocking call.
+Returns the committed offsets on success.
+    </p>
+    <h3 id="Consumer.CommitOffsets">
+     func (*Consumer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go?s=5473:5557#L178">
+      CommitOffsets
+     </a>
+    </h3>
+    <pre>func (c *<a href="#Consumer">Consumer</a>) CommitOffsets(offsets []<a href="#TopicPartition">TopicPartition</a>) ([]<a href="#TopicPartition">TopicPartition</a>, <a href="http://golang.org/pkg/builtin/#error">error</a>)</pre>
+    <p>
+     CommitOffsets commits the provided list of offsets
+This is a blocking call.
+Returns the committed offsets on success.
+    </p>
+    <h3 id="Consumer.Events">
+     func (*Consumer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go?s=5981:6019#L196">
+      Events
+     </a>
+    </h3>
+    <pre>func (c *<a href="#Consumer">Consumer</a>) Events() chan <a href="#Event">Event</a></pre>
+    <p>
+     Events returns the Events channel (if enabled)
+    </p>
+    <h3 id="Consumer.GetMetadata">
+     func (*Consumer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go?s=10490:10585#L347">
+      GetMetadata
+     </a>
+    </h3>
+    <pre>func (c *<a href="#Consumer">Consumer</a>) GetMetadata(topic *<a href="http://golang.org/pkg/builtin/#string">string</a>, allTopics <a href="http://golang.org/pkg/builtin/#bool">bool</a>, timeoutMs <a href="http://golang.org/pkg/builtin/#int">int</a>) (*<a href="#Metadata">Metadata</a>, <a href="http://golang.org/pkg/builtin/#error">error</a>)</pre>
+    <p>
+     GetMetadata queries broker for cluster and topic metadata.
+If topic is non-nil only information about that topic is returned, else if
+allTopics is false only information about locally used topics is returned,
+else information about all topics is returned.
+    </p>
+    <h3 id="Consumer.Poll">
+     func (*Consumer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go?s=5809:5861#L190">
+      Poll
+     </a>
+    </h3>
+    <pre>func (c *<a href="#Consumer">Consumer</a>) Poll(timeoutMs <a href="http://golang.org/pkg/builtin/#int">int</a>) (event <a href="#Event">Event</a>)</pre>
+    <p>
+     Poll the consumer for messages or events.
+    </p>
+    <h3 id="hdr-Will_block_for_at_most_timeoutMs_milliseconds">
+     Will block for at most timeoutMs milliseconds
+    </h3>
+    <p>
+     The following callbacks may be triggered:
+    </p>
+    <pre>Subscribe()'s rebalanceCb
+</pre>
+    <p>
+     Returns nil on timeout, else an Event
+    </p>
+    <h3 id="Consumer.QueryWatermarkOffsets">
+     func (*Consumer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go?s=10748:10863#L353">
+      QueryWatermarkOffsets
+     </a>
+    </h3>
+    <pre>func (c *<a href="#Consumer">Consumer</a>) QueryWatermarkOffsets(topic <a href="http://golang.org/pkg/builtin/#string">string</a>, partition <a href="http://golang.org/pkg/builtin/#int32">int32</a>, timeoutMs <a href="http://golang.org/pkg/builtin/#int">int</a>) (low, high <a href="http://golang.org/pkg/builtin/#int64">int64</a>, err <a href="http://golang.org/pkg/builtin/#error">error</a>)</pre>
+    <p>
+     QueryWatermarkOffsets returns the broker's low and high offsets for the given topic
+and partition.
+    </p>
+    <h3 id="Consumer.String">
+     func (*Consumer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go?s=1272:1306#L36">
+      String
+     </a>
+    </h3>
+    <pre>func (c *<a href="#Consumer">Consumer</a>) String() <a href="http://golang.org/pkg/builtin/#string">string</a></pre>
+    <p>
+     Strings returns a human readable name for a Consumer instance
+    </p>
+    <h3 id="Consumer.Subscribe">
+     func (*Consumer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go?s=1518:1591#L47">
+      Subscribe
+     </a>
+    </h3>
+    <pre>func (c *<a href="#Consumer">Consumer</a>) Subscribe(topic <a href="http://golang.org/pkg/builtin/#string">string</a>, rebalanceCb <a href="#RebalanceCb">RebalanceCb</a>) <a href="http://golang.org/pkg/builtin/#error">error</a></pre>
+    <p>
+     Subscribe to a single topic
+This replaces the current subscription
+    </p>
+    <h3 id="Consumer.SubscribeTopics">
+     func (*Consumer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go?s=1758:1846#L53">
+      SubscribeTopics
+     </a>
+    </h3>
+    <pre>func (c *<a href="#Consumer">Consumer</a>) SubscribeTopics(topics []<a href="http://golang.org/pkg/builtin/#string">string</a>, rebalanceCb <a href="#RebalanceCb">RebalanceCb</a>) (err <a href="http://golang.org/pkg/builtin/#error">error</a>)</pre>
+    <p>
+     SubscribeTopics subscribes to the provided list of topics.
+This replaces the current subscription.
+    </p>
+    <h3 id="Consumer.Unassign">
+     func (*Consumer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go?s=3022:3063#L97">
+      Unassign
+     </a>
+    </h3>
+    <pre>func (c *<a href="#Consumer">Consumer</a>) Unassign() (err <a href="http://golang.org/pkg/builtin/#error">error</a>)</pre>
+    <p>
+     Unassign the current set of partitions to consume.
+    </p>
+    <h3 id="Consumer.Unsubscribe">
+     func (*Consumer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go?s=2451:2495#L75">
+      Unsubscribe
+     </a>
+    </h3>
+    <pre>func (c *<a href="#Consumer">Consumer</a>) Unsubscribe() (err <a href="http://golang.org/pkg/builtin/#error">error</a>)</pre>
+    <p>
+     Unsubscribe from the current subscription, if any.
+    </p>
+    <h2 id="Error">
+     type
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/error.go?s=862:912#L19">
+      Error
+     </a>
+    </h2>
+    <pre>type Error struct {
+    <span class="comment">// contains filtered or unexported fields</span>
+}</pre>
+    <p>
+     Error provides a Kafka-specific error container
+    </p>
+    <h3 id="Error.Code">
+     func (Error)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/error.go?s=1649:1680#L57">
+      Code
+     </a>
+    </h3>
+    <pre>func (e <a href="#Error">Error</a>) Code() <a href="#ErrorCode">ErrorCode</a></pre>
+    <p>
+     Code returns the ErrorCode of an Error
+    </p>
+    <h3 id="Error.Error">
+     func (Error)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/error.go?s=1392:1421#L44">
+      Error
+     </a>
+    </h3>
+    <pre>func (e <a href="#Error">Error</a>) Error() <a href="http://golang.org/pkg/builtin/#string">string</a></pre>
+    <p>
+     Error returns a human readable representation of an Error
+Same as Error.String()
+    </p>
+    <h3 id="Error.String">
+     func (Error)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/error.go?s=1508:1538#L49">
+      String
+     </a>
+    </h3>
+    <pre>func (e <a href="#Error">Error</a>) String() <a href="http://golang.org/pkg/builtin/#string">string</a></pre>
+    <p>
+     String returns a human readable representation of an Error
+    </p>
+    <h2 id="ErrorCode">
+     type
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/generated_errors.go?s=328:346#L1">
+      ErrorCode
+     </a>
+    </h2>
+    <pre>type ErrorCode <a href="http://golang.org/pkg/builtin/#int">int</a></pre>
+    <p>
+     ErrorCode is the integer representation of local and broker error codes
+    </p>
+    <pre>const (
+    <span class="comment">// ErrBadMsg Local: Bad message format</span>
+    <span id="ErrBadMsg">ErrBadMsg</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__BAD_MSG">RD_KAFKA_RESP_ERR__BAD_MSG</a>)
+    <span class="comment">// ErrBadCompression Local: Invalid compressed data</span>
+    <span id="ErrBadCompression">ErrBadCompression</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__BAD_COMPRESSION">RD_KAFKA_RESP_ERR__BAD_COMPRESSION</a>)
+    <span class="comment">// ErrDestroy Local: Broker handle destroyed</span>
+    <span id="ErrDestroy">ErrDestroy</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__DESTROY">RD_KAFKA_RESP_ERR__DESTROY</a>)
+    <span class="comment">// ErrFail Local: Communication failure with broker</span>
+    <span id="ErrFail">ErrFail</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__FAIL">RD_KAFKA_RESP_ERR__FAIL</a>)
+    <span class="comment">// ErrTransport Local: Broker transport failure</span>
+    <span id="ErrTransport">ErrTransport</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__TRANSPORT">RD_KAFKA_RESP_ERR__TRANSPORT</a>)
+    <span class="comment">// ErrCritSysResource Local: Critical system resource failure</span>
+    <span id="ErrCritSysResource">ErrCritSysResource</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE">RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE</a>)
+    <span class="comment">// ErrResolve Local: Host resolution failure</span>
+    <span id="ErrResolve">ErrResolve</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__RESOLVE">RD_KAFKA_RESP_ERR__RESOLVE</a>)
+    <span class="comment">// ErrMsgTimedOut Local: Message timed out</span>
+    <span id="ErrMsgTimedOut">ErrMsgTimedOut</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__MSG_TIMED_OUT">RD_KAFKA_RESP_ERR__MSG_TIMED_OUT</a>)
+    <span class="comment">// ErrPartitionEOF Broker: No more messages</span>
+    <span id="ErrPartitionEOF">ErrPartitionEOF</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__PARTITION_EOF">RD_KAFKA_RESP_ERR__PARTITION_EOF</a>)
+    <span class="comment">// ErrUnknownPartition Local: Unknown partition</span>
+    <span id="ErrUnknownPartition">ErrUnknownPartition</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION">RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION</a>)
+    <span class="comment">// ErrFs Local: File or filesystem error</span>
+    <span id="ErrFs">ErrFs</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__FS">RD_KAFKA_RESP_ERR__FS</a>)
+    <span class="comment">// ErrUnknownTopic Local: Unknown topic</span>
+    <span id="ErrUnknownTopic">ErrUnknownTopic</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC">RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC</a>)
+    <span class="comment">// ErrAllBrokersDown Local: All broker connections are down</span>
+    <span id="ErrAllBrokersDown">ErrAllBrokersDown</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN">RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN</a>)
+    <span class="comment">// ErrInvalidArg Local: Invalid argument or configuration</span>
+    <span id="ErrInvalidArg">ErrInvalidArg</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__INVALID_ARG">RD_KAFKA_RESP_ERR__INVALID_ARG</a>)
+    <span class="comment">// ErrTimedOut Local: Timed out</span>
+    <span id="ErrTimedOut">ErrTimedOut</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__TIMED_OUT">RD_KAFKA_RESP_ERR__TIMED_OUT</a>)
+    <span class="comment">// ErrQueueFull Local: Queue full</span>
+    <span id="ErrQueueFull">ErrQueueFull</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__QUEUE_FULL">RD_KAFKA_RESP_ERR__QUEUE_FULL</a>)
+    <span class="comment">// ErrIsrInsuff Local: ISR count insufficient</span>
+    <span id="ErrIsrInsuff">ErrIsrInsuff</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__ISR_INSUFF">RD_KAFKA_RESP_ERR__ISR_INSUFF</a>)
+    <span class="comment">// ErrNodeUpdate Local: Broker node update</span>
+    <span id="ErrNodeUpdate">ErrNodeUpdate</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__NODE_UPDATE">RD_KAFKA_RESP_ERR__NODE_UPDATE</a>)
+    <span class="comment">// ErrSsl Local: SSL error</span>
+    <span id="ErrSsl">ErrSsl</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__SSL">RD_KAFKA_RESP_ERR__SSL</a>)
+    <span class="comment">// ErrWaitCoord Local: Waiting for coordinator</span>
+    <span id="ErrWaitCoord">ErrWaitCoord</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__WAIT_COORD">RD_KAFKA_RESP_ERR__WAIT_COORD</a>)
+    <span class="comment">// ErrUnknownGroup Local: Unknown group</span>
+    <span id="ErrUnknownGroup">ErrUnknownGroup</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__UNKNOWN_GROUP">RD_KAFKA_RESP_ERR__UNKNOWN_GROUP</a>)
+    <span class="comment">// ErrInProgress Local: Operation in progress</span>
+    <span id="ErrInProgress">ErrInProgress</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__IN_PROGRESS">RD_KAFKA_RESP_ERR__IN_PROGRESS</a>)
+    <span class="comment">// ErrPrevInProgress Local: Previous operation in progress</span>
+    <span id="ErrPrevInProgress">ErrPrevInProgress</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS">RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS</a>)
+    <span class="comment">// ErrExistingSubscription Local: Existing subscription</span>
+    <span id="ErrExistingSubscription">ErrExistingSubscription</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION">RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION</a>)
+    <span class="comment">// ErrAssignPartitions Local: Assign partitions</span>
+    <span id="ErrAssignPartitions">ErrAssignPartitions</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS">RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS</a>)
+    <span class="comment">// ErrRevokePartitions Local: Revoke partitions</span>
+    <span id="ErrRevokePartitions">ErrRevokePartitions</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS">RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS</a>)
+    <span class="comment">// ErrConflict Local: Conflicting use</span>
+    <span id="ErrConflict">ErrConflict</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__CONFLICT">RD_KAFKA_RESP_ERR__CONFLICT</a>)
+    <span class="comment">// ErrState Local: Erroneous state</span>
+    <span id="ErrState">ErrState</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__STATE">RD_KAFKA_RESP_ERR__STATE</a>)
+    <span class="comment">// ErrUnknownProtocol Local: Unknown protocol</span>
+    <span id="ErrUnknownProtocol">ErrUnknownProtocol</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL">RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL</a>)
+    <span class="comment">// ErrNotImplemented Local: Not implemented</span>
+    <span id="ErrNotImplemented">ErrNotImplemented</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED">RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED</a>)
+    <span class="comment">// ErrAuthentication Local: Authentication failure</span>
+    <span id="ErrAuthentication">ErrAuthentication</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__AUTHENTICATION">RD_KAFKA_RESP_ERR__AUTHENTICATION</a>)
+    <span class="comment">// ErrNoOffset Local: No offset stored</span>
+    <span id="ErrNoOffset">ErrNoOffset</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__NO_OFFSET">RD_KAFKA_RESP_ERR__NO_OFFSET</a>)
+    <span class="comment">// ErrOutdated Local: Outdated</span>
+    <span id="ErrOutdated">ErrOutdated</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__OUTDATED">RD_KAFKA_RESP_ERR__OUTDATED</a>)
+    <span class="comment">// ErrTimedOutQueue Local: Timed out in queue</span>
+    <span id="ErrTimedOutQueue">ErrTimedOutQueue</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE">RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE</a>)
+    <span class="comment">// ErrUnknown Unknown broker error</span>
+    <span id="ErrUnknown">ErrUnknown</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_UNKNOWN">RD_KAFKA_RESP_ERR_UNKNOWN</a>)
+    <span class="comment">// ErrNoError Success</span>
+    <span id="ErrNoError">ErrNoError</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_NO_ERROR">RD_KAFKA_RESP_ERR_NO_ERROR</a>)
+    <span class="comment">// ErrOffsetOutOfRange Broker: Offset out of range</span>
+    <span id="ErrOffsetOutOfRange">ErrOffsetOutOfRange</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE">RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE</a>)
+    <span class="comment">// ErrInvalidMsg Broker: Invalid message</span>
+    <span id="ErrInvalidMsg">ErrInvalidMsg</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_INVALID_MSG">RD_KAFKA_RESP_ERR_INVALID_MSG</a>)
+    <span class="comment">// ErrUnknownTopicOrPart Broker: Unknown topic or partition</span>
+    <span id="ErrUnknownTopicOrPart">ErrUnknownTopicOrPart</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART">RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART</a>)
+    <span class="comment">// ErrInvalidMsgSize Broker: Invalid message size</span>
+    <span id="ErrInvalidMsgSize">ErrInvalidMsgSize</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE">RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE</a>)
+    <span class="comment">// ErrLeaderNotAvailable Broker: Leader not available</span>
+    <span id="ErrLeaderNotAvailable">ErrLeaderNotAvailable</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE">RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE</a>)
+    <span class="comment">// ErrNotLeaderForPartition Broker: Not leader for partition</span>
+    <span id="ErrNotLeaderForPartition">ErrNotLeaderForPartition</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION">RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION</a>)
+    <span class="comment">// ErrRequestTimedOut Broker: Request timed out</span>
+    <span id="ErrRequestTimedOut">ErrRequestTimedOut</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT">RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT</a>)
+    <span class="comment">// ErrBrokerNotAvailable Broker: Broker not available</span>
+    <span id="ErrBrokerNotAvailable">ErrBrokerNotAvailable</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE">RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE</a>)
+    <span class="comment">// ErrReplicaNotAvailable Broker: Replica not available</span>
+    <span id="ErrReplicaNotAvailable">ErrReplicaNotAvailable</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE">RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE</a>)
+    <span class="comment">// ErrMsgSizeTooLarge Broker: Message size too large</span>
+    <span id="ErrMsgSizeTooLarge">ErrMsgSizeTooLarge</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE">RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE</a>)
+    <span class="comment">// ErrStaleCtrlEpoch Broker: StaleControllerEpochCode</span>
+    <span id="ErrStaleCtrlEpoch">ErrStaleCtrlEpoch</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH">RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH</a>)
+    <span class="comment">// ErrOffsetMetadataTooLarge Broker: Offset metadata string too large</span>
+    <span id="ErrOffsetMetadataTooLarge">ErrOffsetMetadataTooLarge</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE">RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE</a>)
+    <span class="comment">// ErrNetworkException Broker: Broker disconnected before response received</span>
+    <span id="ErrNetworkException">ErrNetworkException</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION">RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION</a>)
+    <span class="comment">// ErrGroupLoadInProgress Broker: Group coordinator load in progress</span>
+    <span id="ErrGroupLoadInProgress">ErrGroupLoadInProgress</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS">RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS</a>)
+    <span class="comment">// ErrGroupCoordinatorNotAvailable Broker: Group coordinator not available</span>
+    <span id="ErrGroupCoordinatorNotAvailable">ErrGroupCoordinatorNotAvailable</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE">RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE</a>)
+    <span class="comment">// ErrNotCoordinatorForGroup Broker: Not coordinator for group</span>
+    <span id="ErrNotCoordinatorForGroup">ErrNotCoordinatorForGroup</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP">RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP</a>)
+    <span class="comment">// ErrTopicException Broker: Invalid topic</span>
+    <span id="ErrTopicException">ErrTopicException</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION">RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION</a>)
+    <span class="comment">// ErrRecordListTooLarge Broker: Message batch larger than configured server segment size</span>
+    <span id="ErrRecordListTooLarge">ErrRecordListTooLarge</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE">RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE</a>)
+    <span class="comment">// ErrNotEnoughReplicas Broker: Not enough in-sync replicas</span>
+    <span id="ErrNotEnoughReplicas">ErrNotEnoughReplicas</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS">RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS</a>)
+    <span class="comment">// ErrNotEnoughReplicasAfterAppend Broker: Message(s) written to insufficient number of in-sync replicas</span>
+    <span id="ErrNotEnoughReplicasAfterAppend">ErrNotEnoughReplicasAfterAppend</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND">RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND</a>)
+    <span class="comment">// ErrInvalidRequiredAcks Broker: Invalid required acks value</span>
+    <span id="ErrInvalidRequiredAcks">ErrInvalidRequiredAcks</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS">RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS</a>)
+    <span class="comment">// ErrIllegalGeneration Broker: Specified group generation id is not valid</span>
+    <span id="ErrIllegalGeneration">ErrIllegalGeneration</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION">RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION</a>)
+    <span class="comment">// ErrInconsistentGroupProtocol Broker: Inconsistent group protocol</span>
+    <span id="ErrInconsistentGroupProtocol">ErrInconsistentGroupProtocol</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL">RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL</a>)
+    <span class="comment">// ErrInvalidGroupID Broker: Invalid group.id</span>
+    <span id="ErrInvalidGroupID">ErrInvalidGroupID</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_INVALID_GROUP_ID">RD_KAFKA_RESP_ERR_INVALID_GROUP_ID</a>)
+    <span class="comment">// ErrUnknownMemberID Broker: Unknown member</span>
+    <span id="ErrUnknownMemberID">ErrUnknownMemberID</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID">RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID</a>)
+    <span class="comment">// ErrInvalidSessionTimeout Broker: Invalid session timeout</span>
+    <span id="ErrInvalidSessionTimeout">ErrInvalidSessionTimeout</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT">RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT</a>)
+    <span class="comment">// ErrRebalanceInProgress Broker: Group rebalance in progress</span>
+    <span id="ErrRebalanceInProgress">ErrRebalanceInProgress</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS">RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS</a>)
+    <span class="comment">// ErrInvalidCommitOffsetSize Broker: Commit offset data size is not valid</span>
+    <span id="ErrInvalidCommitOffsetSize">ErrInvalidCommitOffsetSize</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE">RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE</a>)
+    <span class="comment">// ErrTopicAuthorizationFailed Broker: Topic authorization failed</span>
+    <span id="ErrTopicAuthorizationFailed">ErrTopicAuthorizationFailed</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED">RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED</a>)
+    <span class="comment">// ErrGroupAuthorizationFailed Broker: Group authorization failed</span>
+    <span id="ErrGroupAuthorizationFailed">ErrGroupAuthorizationFailed</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED">RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED</a>)
+    <span class="comment">// ErrClusterAuthorizationFailed Broker: Cluster authorization failed</span>
+    <span id="ErrClusterAuthorizationFailed">ErrClusterAuthorizationFailed</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED">RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED</a>)
+    <span class="comment">// ErrInvalidTimestamp Broker: Invalid timestamp</span>
+    <span id="ErrInvalidTimestamp">ErrInvalidTimestamp</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP">RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP</a>)
+    <span class="comment">// ErrUnsupportedSaslMechanism Broker: Unsupported SASL mechanism</span>
+    <span id="ErrUnsupportedSaslMechanism">ErrUnsupportedSaslMechanism</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM">RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM</a>)
+    <span class="comment">// ErrIllegalSaslState Broker: Request not valid in current SASL state</span>
+    <span id="ErrIllegalSaslState">ErrIllegalSaslState</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE">RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE</a>)
+    <span class="comment">// ErrUnsupportedVersion Broker: API version not supported</span>
+    <span id="ErrUnsupportedVersion">ErrUnsupportedVersion</span> <a href="#ErrorCode">ErrorCode</a> = <a href="#ErrorCode">ErrorCode</a>(<a href="http://golang.org/pkg/C/">C</a>.<a href="http://golang.org/pkg/C/#RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION">RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION</a>)
+)</pre>
+    <h3 id="ErrorCode.String">
+     func (ErrorCode)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/generated_errors.go?s=415:449#L4">
+      String
+     </a>
+    </h3>
+    <pre>func (c <a href="#ErrorCode">ErrorCode</a>) String() <a href="http://golang.org/pkg/builtin/#string">string</a></pre>
+    <p>
+     String returns a human readable representation of an error code
+    </p>
+    <h2 id="Event">
+     type
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/event.go?s=1412:1517#L41">
+      Event
+     </a>
+    </h2>
+    <pre>type Event interface {
+    <span class="comment">// String returns a human-readable representation of the event</span>
+    String() <a href="http://golang.org/pkg/builtin/#string">string</a>
+}</pre>
+    <p>
+     Event generic interface
+    </p>
+    <h2 id="Handle">
+     type
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/handle.go?s=822:868#L23">
+      Handle
+     </a>
+    </h2>
+    <pre>type Handle interface {
+    <span class="comment">// contains filtered or unexported methods</span>
+}</pre>
+    <p>
+     Handle represents a generic client handle containing common parts for
+both Producer and Consumer.
+    </p>
+    <h2 id="Message">
+     type
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/message.go?s=2465:2649#L76">
+      Message
+     </a>
+    </h2>
+    <pre>type Message struct {
+    TopicPartition <a href="#TopicPartition">TopicPartition</a>
+    Value          []<a href="http://golang.org/pkg/builtin/#byte">byte</a>
+    Key            []<a href="http://golang.org/pkg/builtin/#byte">byte</a>
+    Timestamp      <a href="http://golang.org/pkg/time/">time</a>.<a href="http://golang.org/pkg/time/#Time">Time</a>
+    TimestampType  <a href="#TimestampType">TimestampType</a>
+    Opaque         interface{}
+}</pre>
+    <p>
+     Message represents a Kafka message
+    </p>
+    <h3 id="Message.String">
+     func (*Message)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/message.go?s=2755:2788#L87">
+      String
+     </a>
+    </h3>
+    <pre>func (m *<a href="#Message">Message</a>) String() <a href="http://golang.org/pkg/builtin/#string">string</a></pre>
+    <p>
+     String returns a human readable representation of a Message.
+Key and payload are not represented.
+    </p>
+    <h2 id="Metadata">
+     type
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/metadata.go?s=1723:1842#L60">
+      Metadata
+     </a>
+    </h2>
+    <pre>type Metadata struct {
+    Brokers []<a href="#BrokerMetadata">BrokerMetadata</a>
+    Topics  map[<a href="http://golang.org/pkg/builtin/#string">string</a>]<a href="#TopicMetadata">TopicMetadata</a>
+
+    OriginatingBroker <a href="#BrokerMetadata">BrokerMetadata</a>
+}</pre>
+    <p>
+     Metadata contains broker and topic metadata for all (matching) topics
+    </p>
+    <h2 id="Offset">
+     type
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go?s=6428:6445#L149">
+      Offset
+     </a>
+    </h2>
+    <pre>type Offset <a href="http://golang.org/pkg/builtin/#int64">int64</a></pre>
+    <p>
+     Offset type (int64) with support for canonical names
+    </p>
+    <h3 id="NewOffset">
+     func
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go?s=7384:7434#L192">
+      NewOffset
+     </a>
+    </h3>
+    <pre>func NewOffset(offset interface{}) (<a href="#Offset">Offset</a>, <a href="http://golang.org/pkg/builtin/#error">error</a>)</pre>
+    <p>
+     NewOffset creates a new Offset using the provided logical string, or an
+absolute int64 offset value.
+Logical offsets: "beginning", "earliest", "end", "latest", "unset", "invalid", "stored"
+    </p>
+    <h3 id="OffsetTail">
+     func
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go?s=8170:8215#L231">
+      OffsetTail
+     </a>
+    </h3>
+    <pre>func OffsetTail(relativeOffset <a href="#Offset">Offset</a>) <a href="#Offset">Offset</a></pre>
+    <p>
+     OffsetTail returns the logical offset relativeOffset from current end of partition
+    </p>
+    <h3 id="Offset.Set">
+     func (Offset)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go?s=7064:7109#L179">
+      Set
+     </a>
+    </h3>
+    <pre>func (o <a href="#Offset">Offset</a>) Set(offset interface{}) <a href="http://golang.org/pkg/builtin/#error">error</a></pre>
+    <p>
+     Set offset value, see NewOffset()
+    </p>
+    <h3 id="Offset.String">
+     func (Offset)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go?s=6776:6807#L163">
+      String
+     </a>
+    </h3>
+    <pre>func (o <a href="#Offset">Offset</a>) String() <a href="http://golang.org/pkg/builtin/#string">string</a></pre>
+    <h2 id="OffsetsCommitted">
+     type
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/event.go?s=2266:2339#L74">
+      OffsetsCommitted
+     </a>
+    </h2>
+    <pre>type OffsetsCommitted struct {
+    Error   <a href="http://golang.org/pkg/builtin/#error">error</a>
+    Offsets []<a href="#TopicPartition">TopicPartition</a>
+}</pre>
+    <p>
+     OffsetsCommitted reports committed offsets
+    </p>
+    <h3 id="OffsetsCommitted.String">
+     func (OffsetsCommitted)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/event.go?s=2341:2382#L79">
+      String
+     </a>
+    </h3>
+    <pre>func (o <a href="#OffsetsCommitted">OffsetsCommitted</a>) String() <a href="http://golang.org/pkg/builtin/#string">string</a></pre>
+    <h2 id="PartitionEOF">
+     type
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/event.go?s=2091:2123#L67">
+      PartitionEOF
+     </a>
+    </h2>
+    <pre>type PartitionEOF <a href="#TopicPartition">TopicPartition</a></pre>
+    <p>
+     PartitionEOF consumer reached end of partition
+    </p>
+    <h3 id="PartitionEOF.String">
+     func (PartitionEOF)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/event.go?s=2125:2162#L69">
+      String
+     </a>
+    </h3>
+    <pre>func (p <a href="#PartitionEOF">PartitionEOF</a>) String() <a href="http://golang.org/pkg/builtin/#string">string</a></pre>
+    <h2 id="PartitionMetadata">
+     type
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/metadata.go?s=1386:1503#L44">
+      PartitionMetadata
+     </a>
+    </h2>
+    <pre>type PartitionMetadata struct {
+    ID       <a href="http://golang.org/pkg/builtin/#int32">int32</a>
+    Error    <a href="#Error">Error</a>
+    Leader   <a href="http://golang.org/pkg/builtin/#int32">int32</a>
+    Replicas []<a href="http://golang.org/pkg/builtin/#int32">int32</a>
+    Isrs     []<a href="http://golang.org/pkg/builtin/#int32">int32</a>
+}</pre>
+    <p>
+     PartitionMetadata contains per-partition metadata
+    </p>
+    <h2 id="Producer">
+     type
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/producer.go?s=1101:1270#L30">
+      Producer
+     </a>
+    </h2>
+    <pre>type Producer struct {
+    <span class="comment">// contains filtered or unexported fields</span>
+}</pre>
+    <p>
+     Producer implements a High-level Apache Kafka Producer instance
+    </p>
+    <h3 id="NewProducer">
+     func
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/producer.go?s=6249:6301#L203">
+      NewProducer
+     </a>
+    </h3>
+    <pre>func NewProducer(conf *<a href="#ConfigMap">ConfigMap</a>) (*<a href="#Producer">Producer</a>, <a href="http://golang.org/pkg/builtin/#error">error</a>)</pre>
+    <p>
+     NewProducer creates a new high-level Producer instance.
+    </p>
+    <p>
+     conf is a *ConfigMap with standard librdkafka configuration properties, see here:
+    </p>
+    <p>
+     Supported special configuration properties:
+    </p>
+    <pre>go.batch.producer (bool, false) - Enable batch producer (experimental for increased performance).
+                                  These batches do not relate to Kafka message batches in any way.
+go.delivery.reports (bool, true) - Forward per-message delivery reports to the
+                                   Events() channel.
+go.produce.channel.size (int, 1000000) - ProduceChannel() buffer size (in number of messages)
+</pre>
+    <h3 id="Producer.Close">
+     func (*Producer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/producer.go?s=5283:5309#L174">
+      Close
+     </a>
+    </h3>
+    <pre>func (p *<a href="#Producer">Producer</a>) Close()</pre>
+    <p>
+     Close a Producer instance.
+The Producer object or its channels are no longer usable after this call.
+    </p>
+    <h3 id="Producer.Events">
+     func (*Producer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/producer.go?s=4035:4073#L134">
+      Events
+     </a>
+    </h3>
+    <pre>func (p *<a href="#Producer">Producer</a>) Events() chan <a href="#Event">Event</a></pre>
+    <p>
+     Events returns the Events channel (read)
+    </p>
+    <h3 id="Producer.Flush">
+     func (*Producer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/producer.go?s=4779:4822#L154">
+      Flush
+     </a>
+    </h3>
+    <pre>func (p *<a href="#Producer">Producer</a>) Flush(timeoutMs <a href="http://golang.org/pkg/builtin/#int">int</a>) <a href="http://golang.org/pkg/builtin/#int">int</a></pre>
+    <p>
+     Flush and wait for outstanding messages and requests to complete delivery.
+Includes messages on ProduceChannel.
+Runs until value reaches zero or on timeoutMs.
+Returns the number of outstanding events still un-flushed.
+    </p>
+    <h3 id="Producer.GetMetadata">
+     func (*Producer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/producer.go?s=9852:9947#L354">
+      GetMetadata
+     </a>
+    </h3>
+    <pre>func (p *<a href="#Producer">Producer</a>) GetMetadata(topic *<a href="http://golang.org/pkg/builtin/#string">string</a>, allTopics <a href="http://golang.org/pkg/builtin/#bool">bool</a>, timeoutMs <a href="http://golang.org/pkg/builtin/#int">int</a>) (*<a href="#Metadata">Metadata</a>, <a href="http://golang.org/pkg/builtin/#error">error</a>)</pre>
+    <p>
+     GetMetadata queries broker for cluster and topic metadata.
+If topic is non-nil only information about that topic is returned, else if
+allTopics is false only information about locally used topics is returned,
+else information about all topics is returned.
+    </p>
+    <h3 id="Producer.Len">
+     func (*Producer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/producer.go?s=4429:4457#L146">
+      Len
+     </a>
+    </h3>
+    <pre>func (p *<a href="#Producer">Producer</a>) Len() <a href="http://golang.org/pkg/builtin/#int">int</a></pre>
+    <p>
+     Len returns the number of messages and requests waiting to be transmitted to the broker
+as well as delivery reports queued for the application.
+Includes messages on ProduceChannel.
+    </p>
+    <h3 id="Producer.Produce">
+     func (*Producer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/producer.go?s=3205:3276#L109">
+      Produce
+     </a>
+    </h3>
+    <pre>func (p *<a href="#Producer">Producer</a>) Produce(msg *<a href="#Message">Message</a>, deliveryChan chan <a href="#Event">Event</a>) <a href="http://golang.org/pkg/builtin/#error">error</a></pre>
+    <p>
+     Produce single message.
+This is an asynchronous call that enqueues the message on the internal
+transmit queue, thus returning immediately.
+The delivery report will be sent on the provided deliveryChan if specified,
+or on the Producer object's Events() channel if not.
+Returns an error if message could not be enqueued.
+    </p>
+    <h3 id="Producer.ProduceChannel">
+     func (*Producer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/producer.go?s=4159:4208#L139">
+      ProduceChannel
+     </a>
+    </h3>
+    <pre>func (p *<a href="#Producer">Producer</a>) ProduceChannel() chan *<a href="#Message">Message</a></pre>
+    <p>
+     ProduceChannel returns the produce *Message channel (write)
+    </p>
+    <h3 id="Producer.QueryWatermarkOffsets">
+     func (*Producer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/producer.go?s=10110:10225#L360">
+      QueryWatermarkOffsets
+     </a>
+    </h3>
+    <pre>func (p *<a href="#Producer">Producer</a>) QueryWatermarkOffsets(topic <a href="http://golang.org/pkg/builtin/#string">string</a>, partition <a href="http://golang.org/pkg/builtin/#int32">int32</a>, timeoutMs <a href="http://golang.org/pkg/builtin/#int">int</a>) (low, high <a href="http://golang.org/pkg/builtin/#int64">int64</a>, err <a href="http://golang.org/pkg/builtin/#error">error</a>)</pre>
+    <p>
+     QueryWatermarkOffsets returns the broker's low and high offsets for the given topic
+and partition.
+    </p>
+    <h3 id="Producer.String">
+     func (*Producer)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/producer.go?s=1336:1370#L40">
+      String
+     </a>
+    </h3>
+    <pre>func (p *<a href="#Producer">Producer</a>) String() <a href="http://golang.org/pkg/builtin/#string">string</a></pre>
+    <p>
+     String returns a human readable name for a Producer instance
+    </p>
+    <h2 id="RebalanceCb">
+     type
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go?s=854:899#L22">
+      RebalanceCb
+     </a>
+    </h2>
+    <pre>type RebalanceCb func(*<a href="#Consumer">Consumer</a>, <a href="#Event">Event</a>) <a href="http://golang.org/pkg/builtin/#error">error</a></pre>
+    <p>
+     RebalanceCb provides a per-Subscribe*() rebalance event callback.
+The passed Event will be either AssignedPartitions or RevokedPartitions
+    </p>
+    <h2 id="RevokedPartitions">
+     type
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/event.go?s=1870:1932#L58">
+      RevokedPartitions
+     </a>
+    </h2>
+    <pre>type RevokedPartitions struct {
+    Partitions []<a href="#TopicPartition">TopicPartition</a>
+}</pre>
+    <p>
+     RevokedPartitions consumer group rebalance event: revoked partition set
+    </p>
+    <h3 id="RevokedPartitions.String">
+     func (RevokedPartitions)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/event.go?s=1934:1976#L62">
+      String
+     </a>
+    </h3>
+    <pre>func (e <a href="#RevokedPartitions">RevokedPartitions</a>) String() <a href="http://golang.org/pkg/builtin/#string">string</a></pre>
+    <h2 id="TimestampType">
+     type
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/message.go?s=1671:1693#L51">
+      TimestampType
+     </a>
+    </h2>
+    <pre>type TimestampType <a href="http://golang.org/pkg/builtin/#int">int</a></pre>
+    <p>
+     TimestampType is a the Message timestamp type or source
+    </p>
+    <h3 id="TimestampType.String">
+     func (TimestampType)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/message.go?s=2187:2225#L62">
+      String
+     </a>
+    </h3>
+    <pre>func (t <a href="#TimestampType">TimestampType</a>) String() <a href="http://golang.org/pkg/builtin/#string">string</a></pre>
+    <h2 id="TopicMetadata">
+     type
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/metadata.go?s=1550:1648#L53">
+      TopicMetadata
+     </a>
+    </h2>
+    <pre>type TopicMetadata struct {
+    Topic      <a href="http://golang.org/pkg/builtin/#string">string</a>
+    Partitions []<a href="#PartitionMetadata">PartitionMetadata</a>
+    Error      <a href="#Error">Error</a>
+}</pre>
+    <p>
+     TopicMetadata contains per-topic metadata
+    </p>
+    <h2 id="TopicPartition">
+     type
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go?s=8377:8478#L236">
+      TopicPartition
+     </a>
+    </h2>
+    <pre>type TopicPartition struct {
+    Topic     *<a href="http://golang.org/pkg/builtin/#string">string</a>
+    Partition <a href="http://golang.org/pkg/builtin/#int32">int32</a>
+    Offset    <a href="#Offset">Offset</a>
+    Error     <a href="http://golang.org/pkg/builtin/#error">error</a>
+}</pre>
+    <p>
+     TopicPartition is a generic placeholder for a Topic+Partition and optionally Offset.
+    </p>
+    <h3 id="TopicPartition.String">
+     func (TopicPartition)
+     <a href="http://golang.org/src/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go?s=8480:8519#L243">
+      String
+     </a>
+    </h3>
+    <pre>func (p <a href="#TopicPartition">TopicPartition</a>) String() <a href="http://golang.org/pkg/builtin/#string">string</a></pre>
+    <div id="footer">
+     Build version go1.6.
+     <br>
+      Except as
+      <a href="https://developers.google.com/site-policies#restrictions">
+       noted
+      </a>
+      ,
+the content of this page is licensed under the
+Creative Commons Attribution 3.0 License,
+and code is licensed under a
+      <a href="http://golang.org/LICENSE">
+       BSD license
+      </a>
+      .
+      <br>
+       <a href="http://golang.org/doc/tos.html">
+        Terms of Service
+       </a>
+       |
+       <a href="http://www.google.com/intl/en/policies/privacy/">
+        Privacy Policy
+       </a>
+      </br>
+     </br>
+    </div>
+   </div>
+   <!-- .container -->
+  </div>
+  <!-- #page -->
+  <!-- TODO(adonovan): load these from <head> using "defer" attribute? -->
+  <script src="http://golang.org/lib/godoc/jquery.js" type="text/javascript">
+  </script>
+  <script src="http://golang.org/lib/godoc/jquery.treeview.js" type="text/javascript">
+  </script>
+  <script src="http://golang.org/lib/godoc/jquery.treeview.edit.js" type="text/javascript">
+  </script>
+  <script src="http://golang.org/lib/godoc/godocs.js" type="text/javascript">
+  </script>
+ </body>
+</html>
+
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_dynamic.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_dynamic.go
new file mode 100644
index 0000000..c14c1f6
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_dynamic.go
@@ -0,0 +1,7 @@
+// +build !static
+// +build !static_all
+
+package kafka
+
+// #cgo pkg-config: rdkafka
+import "C"
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_static.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_static.go
new file mode 100644
index 0000000..3c8799c
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_static.go
@@ -0,0 +1,7 @@
+// +build static
+// +build !static_all
+
+package kafka
+
+// #cgo pkg-config: rdkafka-static
+import "C"
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_static_all.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_static_all.go
new file mode 100644
index 0000000..8afb8c9
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_static_all.go
@@ -0,0 +1,8 @@
+// +build !static
+// +build static_all
+
+package kafka
+
+// #cgo pkg-config: rdkafka-static
+// #cgo LDFLAGS: -static
+import "C"
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/config.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/config.go
new file mode 100644
index 0000000..5866a47
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/config.go
@@ -0,0 +1,243 @@
+package kafka
+
+/**
+ * Copyright 2016 Confluent Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"unsafe"
+)
+
+/*
+#include <stdlib.h>
+#include <librdkafka/rdkafka.h>
+*/
+import "C"
+
+// ConfigValue supports the following types:
+//  bool, int, string, any type with the standard String() interface
+type ConfigValue interface{}
+
+// ConfigMap is a map contaning standard librdkafka configuration properties as documented in:
+// https://github.com/edenhill/librdkafka/tree/master/CONFIGURATION.md
+//
+// The special property "default.topic.config" (optional) is a ConfigMap containing default topic
+// configuration properties.
+type ConfigMap map[string]ConfigValue
+
+// SetKey sets configuration property key to value.
+// For user convenience a key prefixed with {topic}. will be
+// set on the "default.topic.config" sub-map.
+func (m ConfigMap) SetKey(key string, value ConfigValue) error {
+	if strings.HasPrefix(key, "{topic}.") {
+		_, found := m["default.topic.config"]
+		if !found {
+			m["default.topic.config"] = ConfigMap{}
+		}
+		m["default.topic.config"].(ConfigMap)[strings.TrimPrefix(key, "{topic}.")] = value
+	} else {
+		m[key] = value
+	}
+
+	return nil
+}
+
+// Set implements flag.Set (command line argument parser) as a convenience
+// for `-X key=value` config.
+func (m ConfigMap) Set(kv string) error {
+	i := strings.Index(kv, "=")
+	if i == -1 {
+		return Error{ErrInvalidArg, "Expected key=value"}
+	}
+
+	k := kv[:i]
+	v := kv[i+1:]
+
+	return m.SetKey(k, v)
+}
+
+func value2string(v ConfigValue) (ret string, errstr string) {
+
+	switch x := v.(type) {
+	case bool:
+		if x {
+			ret = "true"
+		} else {
+			ret = "false"
+		}
+	case int:
+		ret = fmt.Sprintf("%d", x)
+	case string:
+		ret = x
+	case fmt.Stringer:
+		ret = x.String()
+	default:
+		return "", fmt.Sprintf("Invalid value type %T", v)
+	}
+
+	return ret, ""
+}
+
+// rdkAnyconf abstracts rd_kafka_conf_t and rd_kafka_topic_conf_t
+// into a common interface.
+type rdkAnyconf interface {
+	set(cKey *C.char, cVal *C.char, cErrstr *C.char, errstrSize int) C.rd_kafka_conf_res_t
+}
+
+func anyconfSet(anyconf rdkAnyconf, key string, val ConfigValue) (err error) {
+	value, errstr := value2string(val)
+	if errstr != "" {
+		return Error{ErrInvalidArg, fmt.Sprintf("%s for key %s (expected string,bool,int,ConfigMap)", errstr, key)}
+	}
+	cKey := C.CString(key)
+	cVal := C.CString(value)
+	cErrstr := (*C.char)(C.malloc(C.size_t(128)))
+	defer C.free(unsafe.Pointer(cErrstr))
+
+	if anyconf.set(cKey, cVal, cErrstr, 128) != C.RD_KAFKA_CONF_OK {
+		C.free(unsafe.Pointer(cKey))
+		C.free(unsafe.Pointer(cVal))
+		return newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr)
+	}
+
+	return nil
+}
+
+// we need these typedefs to workaround a crash in golint
+// when parsing the set() methods below
+type rdkConf C.rd_kafka_conf_t
+type rdkTopicConf C.rd_kafka_topic_conf_t
+
+func (cConf *rdkConf) set(cKey *C.char, cVal *C.char, cErrstr *C.char, errstrSize int) C.rd_kafka_conf_res_t {
+	return C.rd_kafka_conf_set((*C.rd_kafka_conf_t)(cConf), cKey, cVal, cErrstr, C.size_t(errstrSize))
+}
+
+func (ctopicConf *rdkTopicConf) set(cKey *C.char, cVal *C.char, cErrstr *C.char, errstrSize int) C.rd_kafka_conf_res_t {
+	return C.rd_kafka_topic_conf_set((*C.rd_kafka_topic_conf_t)(ctopicConf), cKey, cVal, cErrstr, C.size_t(errstrSize))
+}
+
+func configConvertAnyconf(m ConfigMap, anyconf rdkAnyconf) (err error) {
+	// set plugins first, any plugin-specific configuration depends on
+	// the plugin to have already been set
+	pluginPaths, ok := m["plugin.library.paths"]
+	if ok {
+		err = anyconfSet(anyconf, "plugin.library.paths", pluginPaths)
+		if err != nil {
+			return err
+		}
+	}
+	for k, v := range m {
+		if k == "plugin.library.paths" {
+			continue
+		}
+		switch v.(type) {
+		case ConfigMap:
+			/* Special sub-ConfigMap, only used for default.topic.config */
+
+			if k != "default.topic.config" {
+				return Error{ErrInvalidArg, fmt.Sprintf("Invalid type for key %s", k)}
+			}
+
+			var cTopicConf = C.rd_kafka_topic_conf_new()
+
+			err = configConvertAnyconf(v.(ConfigMap),
+				(*rdkTopicConf)(cTopicConf))
+			if err != nil {
+				C.rd_kafka_topic_conf_destroy(cTopicConf)
+				return err
+			}
+
+			C.rd_kafka_conf_set_default_topic_conf(
+				(*C.rd_kafka_conf_t)(anyconf.(*rdkConf)),
+				(*C.rd_kafka_topic_conf_t)((*rdkTopicConf)(cTopicConf)))
+
+		default:
+			err = anyconfSet(anyconf, k, v)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+// convert ConfigMap to C rd_kafka_conf_t *
+func (m ConfigMap) convert() (cConf *C.rd_kafka_conf_t, err error) {
+	cConf = C.rd_kafka_conf_new()
+
+	err = configConvertAnyconf(m, (*rdkConf)(cConf))
+	if err != nil {
+		C.rd_kafka_conf_destroy(cConf)
+		return nil, err
+	}
+	return cConf, nil
+}
+
+// get finds key in the configmap and returns its value.
+// If the key is not found defval is returned.
+// If the key is found but the type is mismatched an error is returned.
+func (m ConfigMap) get(key string, defval ConfigValue) (ConfigValue, error) {
+	if strings.HasPrefix(key, "{topic}.") {
+		defconfCv, found := m["default.topic.config"]
+		if !found {
+			return defval, nil
+		}
+		return defconfCv.(ConfigMap).get(strings.TrimPrefix(key, "{topic}."), defval)
+	}
+
+	v, ok := m[key]
+	if !ok {
+		return defval, nil
+	}
+
+	if defval != nil && reflect.TypeOf(defval) != reflect.TypeOf(v) {
+		return nil, Error{ErrInvalidArg, fmt.Sprintf("%s expects type %T, not %T", key, defval, v)}
+	}
+
+	return v, nil
+}
+
+// extract performs a get() and if found deletes the key.
+func (m ConfigMap) extract(key string, defval ConfigValue) (ConfigValue, error) {
+
+	v, err := m.get(key, defval)
+	if err != nil {
+		return nil, err
+	}
+
+	delete(m, key)
+
+	return v, nil
+}
+
+func (m ConfigMap) clone() ConfigMap {
+	m2 := make(ConfigMap)
+	for k, v := range m {
+		m2[k] = v
+	}
+	return m2
+}
+
+// Get finds the given key in the ConfigMap and returns its value.
+// If the key is not found `defval` is returned.
+// If the key is found but the type does not match that of `defval` (unless nil)
+// an ErrInvalidArg error is returned.
+func (m ConfigMap) Get(key string, defval ConfigValue) (ConfigValue, error) {
+	return m.get(key, defval)
+}
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go
new file mode 100644
index 0000000..5c42ece
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go
@@ -0,0 +1,581 @@
+package kafka
+
+/**
+ * Copyright 2016 Confluent Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import (
+	"fmt"
+	"math"
+	"time"
+	"unsafe"
+)
+
+/*
+#include <stdlib.h>
+#include <librdkafka/rdkafka.h>
+
+
+static rd_kafka_topic_partition_t *_c_rdkafka_topic_partition_list_entry(rd_kafka_topic_partition_list_t *rktparlist, int idx) {
+   return idx < rktparlist->cnt ? &rktparlist->elems[idx] : NULL;
+}
+*/
+import "C"
+
+// RebalanceCb provides a per-Subscribe*() rebalance event callback.
+// The passed Event will be either AssignedPartitions or RevokedPartitions
+type RebalanceCb func(*Consumer, Event) error
+
+// Consumer implements a High-level Apache Kafka Consumer instance
+type Consumer struct {
+	events             chan Event
+	handle             handle
+	eventsChanEnable   bool
+	readerTermChan     chan bool
+	rebalanceCb        RebalanceCb
+	appReassigned      bool
+	appRebalanceEnable bool // config setting
+}
+
+// Strings returns a human readable name for a Consumer instance
+func (c *Consumer) String() string {
+	return c.handle.String()
+}
+
+// getHandle implements the Handle interface
+func (c *Consumer) gethandle() *handle {
+	return &c.handle
+}
+
+// Subscribe to a single topic
+// This replaces the current subscription
+func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error {
+	return c.SubscribeTopics([]string{topic}, rebalanceCb)
+}
+
+// SubscribeTopics subscribes to the provided list of topics.
+// This replaces the current subscription.
+func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) (err error) {
+	ctopics := C.rd_kafka_topic_partition_list_new(C.int(len(topics)))
+	defer C.rd_kafka_topic_partition_list_destroy(ctopics)
+
+	for _, topic := range topics {
+		ctopic := C.CString(topic)
+		defer C.free(unsafe.Pointer(ctopic))
+		C.rd_kafka_topic_partition_list_add(ctopics, ctopic, C.RD_KAFKA_PARTITION_UA)
+	}
+
+	e := C.rd_kafka_subscribe(c.handle.rk, ctopics)
+	if e != C.RD_KAFKA_RESP_ERR_NO_ERROR {
+		return newError(e)
+	}
+
+	c.rebalanceCb = rebalanceCb
+	c.handle.currAppRebalanceEnable = c.rebalanceCb != nil || c.appRebalanceEnable
+
+	return nil
+}
+
+// Unsubscribe from the current subscription, if any.
+func (c *Consumer) Unsubscribe() (err error) {
+	C.rd_kafka_unsubscribe(c.handle.rk)
+	return nil
+}
+
+// Assign an atomic set of partitions to consume.
+// This replaces the current assignment.
+func (c *Consumer) Assign(partitions []TopicPartition) (err error) {
+	c.appReassigned = true
+
+	cparts := newCPartsFromTopicPartitions(partitions)
+	defer C.rd_kafka_topic_partition_list_destroy(cparts)
+
+	e := C.rd_kafka_assign(c.handle.rk, cparts)
+	if e != C.RD_KAFKA_RESP_ERR_NO_ERROR {
+		return newError(e)
+	}
+
+	return nil
+}
+
+// Unassign the current set of partitions to consume.
+func (c *Consumer) Unassign() (err error) {
+	c.appReassigned = true
+
+	e := C.rd_kafka_assign(c.handle.rk, nil)
+	if e != C.RD_KAFKA_RESP_ERR_NO_ERROR {
+		return newError(e)
+	}
+
+	return nil
+}
+
+// commit offsets for specified offsets.
+// If offsets is nil the currently assigned partitions' offsets are committed.
+// This is a blocking call, caller will need to wrap in go-routine to
+// get async or throw-away behaviour.
+func (c *Consumer) commit(offsets []TopicPartition) (committedOffsets []TopicPartition, err error) {
+	var rkqu *C.rd_kafka_queue_t
+
+	rkqu = C.rd_kafka_queue_new(c.handle.rk)
+	defer C.rd_kafka_queue_destroy(rkqu)
+
+	var coffsets *C.rd_kafka_topic_partition_list_t
+	if offsets != nil {
+		coffsets = newCPartsFromTopicPartitions(offsets)
+		defer C.rd_kafka_topic_partition_list_destroy(coffsets)
+	}
+
+	cErr := C.rd_kafka_commit_queue(c.handle.rk, coffsets, rkqu, nil, nil)
+	if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR {
+		return nil, newError(cErr)
+	}
+
+	rkev := C.rd_kafka_queue_poll(rkqu, C.int(-1))
+	if rkev == nil {
+		// shouldn't happen
+		return nil, newError(C.RD_KAFKA_RESP_ERR__DESTROY)
+	}
+	defer C.rd_kafka_event_destroy(rkev)
+
+	if C.rd_kafka_event_type(rkev) != C.RD_KAFKA_EVENT_OFFSET_COMMIT {
+		panic(fmt.Sprintf("Expected OFFSET_COMMIT, got %s",
+			C.GoString(C.rd_kafka_event_name(rkev))))
+	}
+
+	cErr = C.rd_kafka_event_error(rkev)
+	if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR {
+		return nil, newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev))
+	}
+
+	cRetoffsets := C.rd_kafka_event_topic_partition_list(rkev)
+	if cRetoffsets == nil {
+		// no offsets, no error
+		return nil, nil
+	}
+	committedOffsets = newTopicPartitionsFromCparts(cRetoffsets)
+
+	return committedOffsets, nil
+}
+
+// Commit offsets for currently assigned partitions
+// This is a blocking call.
+// Returns the committed offsets on success.
+func (c *Consumer) Commit() ([]TopicPartition, error) {
+	return c.commit(nil)
+}
+
+// CommitMessage commits offset based on the provided message.
+// This is a blocking call.
+// Returns the committed offsets on success.
+func (c *Consumer) CommitMessage(m *Message) ([]TopicPartition, error) {
+	if m.TopicPartition.Error != nil {
+		return nil, Error{ErrInvalidArg, "Can't commit errored message"}
+	}
+	offsets := []TopicPartition{m.TopicPartition}
+	offsets[0].Offset++
+	return c.commit(offsets)
+}
+
+// CommitOffsets commits the provided list of offsets
+// This is a blocking call.
+// Returns the committed offsets on success.
+func (c *Consumer) CommitOffsets(offsets []TopicPartition) ([]TopicPartition, error) {
+	return c.commit(offsets)
+}
+
+// StoreOffsets stores the provided list of offsets that will be committed
+// to the offset store according to `auto.commit.interval.ms` or manual
+// offset-less Commit().
+//
+// Returns the stored offsets on success. If at least one offset couldn't be stored,
+// an error and a list of offsets is returned. Each offset can be checked for
+// specific errors via its `.Error` member.
+func (c *Consumer) StoreOffsets(offsets []TopicPartition) (storedOffsets []TopicPartition, err error) {
+	coffsets := newCPartsFromTopicPartitions(offsets)
+	defer C.rd_kafka_topic_partition_list_destroy(coffsets)
+
+	cErr := C.rd_kafka_offsets_store(c.handle.rk, coffsets)
+
+	// coffsets might be annotated with an error
+	storedOffsets = newTopicPartitionsFromCparts(coffsets)
+
+	if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR {
+		return storedOffsets, newError(cErr)
+	}
+
+	return storedOffsets, nil
+}
+
+// Seek seeks the given topic partitions using the offset from the TopicPartition.
+//
+// If timeoutMs is not 0 the call will wait this long for the
+// seek to be performed. If the timeout is reached the internal state
+// will be unknown and this function returns ErrTimedOut.
+// If timeoutMs is 0 it will initiate the seek but return
+// immediately without any error reporting (e.g., async).
+//
+// Seek() may only be used for partitions already being consumed
+// (through Assign() or implicitly through a self-rebalanced Subscribe()).
+// To set the starting offset it is preferred to use Assign() and provide
+// a starting offset for each partition.
+//
+// Returns an error on failure or nil otherwise.
+func (c *Consumer) Seek(partition TopicPartition, timeoutMs int) error {
+	rkt := c.handle.getRkt(*partition.Topic)
+	cErr := C.rd_kafka_seek(rkt,
+		C.int32_t(partition.Partition),
+		C.int64_t(partition.Offset),
+		C.int(timeoutMs))
+	if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR {
+		return newError(cErr)
+	}
+	return nil
+}
+
+// Poll the consumer for messages or events.
+//
+// Will block for at most timeoutMs milliseconds
+//
+// The following callbacks may be triggered:
+//   Subscribe()'s rebalanceCb
+//
+// Returns nil on timeout, else an Event
+func (c *Consumer) Poll(timeoutMs int) (event Event) {
+	ev, _ := c.handle.eventPoll(nil, timeoutMs, 1, nil)
+	return ev
+}
+
+// Events returns the Events channel (if enabled)
+func (c *Consumer) Events() chan Event {
+	return c.events
+}
+
+// ReadMessage polls the consumer for a message.
+//
+// This is a conveniance API that wraps Poll() and only returns
+// messages or errors. All other event types are discarded.
+//
+// The call will block for at most `timeout` waiting for
+// a new message or error. `timeout` may be set to -1 for
+// indefinite wait.
+//
+// Timeout is returned as (nil, err) where err is `kafka.(Error).Code == Kafka.ErrTimedOut`.
+//
+// Messages are returned as (msg, nil),
+// while general errors are returned as (nil, err),
+// and partition-specific errors are returned as (msg, err) where
+// msg.TopicPartition provides partition-specific information (such as topic, partition and offset).
+//
+// All other event types, such as PartitionEOF, AssignedPartitions, etc, are silently discarded.
+//
+func (c *Consumer) ReadMessage(timeout time.Duration) (*Message, error) {
+
+	var absTimeout time.Time
+	var timeoutMs int
+
+	if timeout > 0 {
+		absTimeout = time.Now().Add(timeout)
+		timeoutMs = (int)(timeout.Seconds() * 1000.0)
+	} else {
+		timeoutMs = (int)(timeout)
+	}
+
+	for {
+		ev := c.Poll(timeoutMs)
+
+		switch e := ev.(type) {
+		case *Message:
+			if e.TopicPartition.Error != nil {
+				return e, e.TopicPartition.Error
+			}
+			return e, nil
+		case Error:
+			return nil, e
+		default:
+			// Ignore other event types
+		}
+
+		if timeout > 0 {
+			// Calculate remaining time
+			timeoutMs = int(math.Max(0.0, absTimeout.Sub(time.Now()).Seconds()*1000.0))
+		}
+
+		if timeoutMs == 0 && ev == nil {
+			return nil, newError(C.RD_KAFKA_RESP_ERR__TIMED_OUT)
+		}
+
+	}
+
+}
+
+// Close Consumer instance.
+// The object is no longer usable after this call.
+func (c *Consumer) Close() (err error) {
+
+	if c.eventsChanEnable {
+		// Wait for consumerReader() to terminate (by closing readerTermChan)
+		close(c.readerTermChan)
+		c.handle.waitTerminated(1)
+		close(c.events)
+	}
+
+	C.rd_kafka_queue_destroy(c.handle.rkq)
+	c.handle.rkq = nil
+
+	e := C.rd_kafka_consumer_close(c.handle.rk)
+	if e != C.RD_KAFKA_RESP_ERR_NO_ERROR {
+		return newError(e)
+	}
+
+	c.handle.cleanup()
+
+	C.rd_kafka_destroy(c.handle.rk)
+
+	return nil
+}
+
+// NewConsumer creates a new high-level Consumer instance.
+//
+// Supported special configuration properties:
+//   go.application.rebalance.enable (bool, false) - Forward rebalancing responsibility to application via the Events() channel.
+//                                        If set to true the app must handle the AssignedPartitions and
+//                                        RevokedPartitions events and call Assign() and Unassign()
+//                                        respectively.
+//   go.events.channel.enable (bool, false) - Enable the Events() channel. Messages and events will be pushed on the Events() channel and the Poll() interface will be disabled. (Experimental)
+//   go.events.channel.size (int, 1000) - Events() channel size
+//
+// WARNING: Due to the buffering nature of channels (and queues in general) the
+// use of the events channel risks receiving outdated events and
+// messages. Minimizing go.events.channel.size reduces the risk
+// and number of outdated events and messages but does not eliminate
+// the factor completely. With a channel size of 1 at most one
+// event or message may be outdated.
+func NewConsumer(conf *ConfigMap) (*Consumer, error) {
+
+	err := versionCheck()
+	if err != nil {
+		return nil, err
+	}
+
+	// before we do anything with the configuration, create a copy such that
+	// the original is not mutated.
+	confCopy := conf.clone()
+
+	groupid, _ := confCopy.get("group.id", nil)
+	if groupid == nil {
+		// without a group.id the underlying cgrp subsystem in librdkafka wont get started
+		// and without it there is no way to consume assigned partitions.
+		// So for now require the group.id, this might change in the future.
+		return nil, newErrorFromString(ErrInvalidArg, "Required property group.id not set")
+	}
+
+	c := &Consumer{}
+
+	v, err := confCopy.extract("go.application.rebalance.enable", false)
+	if err != nil {
+		return nil, err
+	}
+	c.appRebalanceEnable = v.(bool)
+
+	v, err = confCopy.extract("go.events.channel.enable", false)
+	if err != nil {
+		return nil, err
+	}
+	c.eventsChanEnable = v.(bool)
+
+	v, err = confCopy.extract("go.events.channel.size", 1000)
+	if err != nil {
+		return nil, err
+	}
+	eventsChanSize := v.(int)
+
+	cConf, err := confCopy.convert()
+	if err != nil {
+		return nil, err
+	}
+	cErrstr := (*C.char)(C.malloc(C.size_t(256)))
+	defer C.free(unsafe.Pointer(cErrstr))
+
+	C.rd_kafka_conf_set_events(cConf, C.RD_KAFKA_EVENT_REBALANCE|C.RD_KAFKA_EVENT_OFFSET_COMMIT|C.RD_KAFKA_EVENT_STATS|C.RD_KAFKA_EVENT_ERROR)
+
+	c.handle.rk = C.rd_kafka_new(C.RD_KAFKA_CONSUMER, cConf, cErrstr, 256)
+	if c.handle.rk == nil {
+		return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr)
+	}
+
+	C.rd_kafka_poll_set_consumer(c.handle.rk)
+
+	c.handle.c = c
+	c.handle.setup()
+	c.handle.rkq = C.rd_kafka_queue_get_consumer(c.handle.rk)
+	if c.handle.rkq == nil {
+		// no cgrp (no group.id configured), revert to main queue.
+		c.handle.rkq = C.rd_kafka_queue_get_main(c.handle.rk)
+	}
+
+	if c.eventsChanEnable {
+		c.events = make(chan Event, eventsChanSize)
+		c.readerTermChan = make(chan bool)
+
+		/* Start rdkafka consumer queue reader -> events writer goroutine */
+		go consumerReader(c, c.readerTermChan)
+	}
+
+	return c, nil
+}
+
+// rebalance calls the application's rebalance callback, if any.
+// Returns true if the underlying assignment was updated, else false.
+func (c *Consumer) rebalance(ev Event) bool {
+	c.appReassigned = false
+
+	if c.rebalanceCb != nil {
+		c.rebalanceCb(c, ev)
+	}
+
+	return c.appReassigned
+}
+
+// consumerReader reads messages and events from the librdkafka consumer queue
+// and posts them on the consumer channel.
+// Runs until termChan closes
+func consumerReader(c *Consumer, termChan chan bool) {
+
+out:
+	for true {
+		select {
+		case _ = <-termChan:
+			break out
+		default:
+			_, term := c.handle.eventPoll(c.events, 100, 1000, termChan)
+			if term {
+				break out
+			}
+
+		}
+	}
+
+	c.handle.terminatedChan <- "consumerReader"
+	return
+
+}
+
+// GetMetadata queries broker for cluster and topic metadata.
+// If topic is non-nil only information about that topic is returned, else if
+// allTopics is false only information about locally used topics is returned,
+// else information about all topics is returned.
+// GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API.
+func (c *Consumer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) {
+	return getMetadata(c, topic, allTopics, timeoutMs)
+}
+
+// QueryWatermarkOffsets returns the broker's low and high offsets for the given topic
+// and partition.
+func (c *Consumer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error) {
+	return queryWatermarkOffsets(c, topic, partition, timeoutMs)
+}
+
+// OffsetsForTimes looks up offsets by timestamp for the given partitions.
+//
+// The returned offset for each partition is the earliest offset whose
+// timestamp is greater than or equal to the given timestamp in the
+// corresponding partition.
+//
+// The timestamps to query are represented as `.Offset` in the `times`
+// argument and the looked up offsets are represented as `.Offset` in the returned
+// `offsets` list.
+//
+// The function will block for at most timeoutMs milliseconds.
+//
+// Duplicate Topic+Partitions are not supported.
+// Per-partition errors may be returned in the `.Error` field.
+func (c *Consumer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) {
+	return offsetsForTimes(c, times, timeoutMs)
+}
+
+// Subscription returns the current subscription as set by Subscribe()
+func (c *Consumer) Subscription() (topics []string, err error) {
+	var cTopics *C.rd_kafka_topic_partition_list_t
+
+	cErr := C.rd_kafka_subscription(c.handle.rk, &cTopics)
+	if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR {
+		return nil, newError(cErr)
+	}
+	defer C.rd_kafka_topic_partition_list_destroy(cTopics)
+
+	topicCnt := int(cTopics.cnt)
+	topics = make([]string, topicCnt)
+	for i := 0; i < topicCnt; i++ {
+		crktpar := C._c_rdkafka_topic_partition_list_entry(cTopics,
+			C.int(i))
+		topics[i] = C.GoString(crktpar.topic)
+	}
+
+	return topics, nil
+}
+
+// Assignment returns the current partition assignments
+func (c *Consumer) Assignment() (partitions []TopicPartition, err error) {
+	var cParts *C.rd_kafka_topic_partition_list_t
+
+	cErr := C.rd_kafka_assignment(c.handle.rk, &cParts)
+	if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR {
+		return nil, newError(cErr)
+	}
+	defer C.rd_kafka_topic_partition_list_destroy(cParts)
+
+	partitions = newTopicPartitionsFromCparts(cParts)
+
+	return partitions, nil
+}
+
+// Committed retrieves committed offsets for the given set of partitions
+func (c *Consumer) Committed(partitions []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) {
+	cparts := newCPartsFromTopicPartitions(partitions)
+	defer C.rd_kafka_topic_partition_list_destroy(cparts)
+	cerr := C.rd_kafka_committed(c.handle.rk, cparts, C.int(timeoutMs))
+	if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR {
+		return nil, newError(cerr)
+	}
+
+	return newTopicPartitionsFromCparts(cparts), nil
+}
+
+// Pause consumption for the provided list of partitions
+//
+// Note that messages already enqueued on the consumer's Event channel
+// (if `go.events.channel.enable` has been set) will NOT be purged by
+// this call, set `go.events.channel.size` accordingly.
+func (c *Consumer) Pause(partitions []TopicPartition) (err error) {
+	cparts := newCPartsFromTopicPartitions(partitions)
+	defer C.rd_kafka_topic_partition_list_destroy(cparts)
+	cerr := C.rd_kafka_pause_partitions(c.handle.rk, cparts)
+	if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR {
+		return newError(cerr)
+	}
+	return nil
+}
+
+// Resume consumption for the provided list of partitions
+func (c *Consumer) Resume(partitions []TopicPartition) (err error) {
+	cparts := newCPartsFromTopicPartitions(partitions)
+	defer C.rd_kafka_topic_partition_list_destroy(cparts)
+	cerr := C.rd_kafka_resume_partitions(c.handle.rk, cparts)
+	if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR {
+		return newError(cerr)
+	}
+	return nil
+}
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/error.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/error.go
new file mode 100644
index 0000000..70a435f
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/error.go
@@ -0,0 +1,77 @@
+package kafka
+
+/**
+ * Copyright 2016 Confluent Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Automatically generate error codes from librdkafka
+// See README for instructions
+//go:generate $GOPATH/bin/go_rdkafka_generr generated_errors.go
+
+/*
+#include <librdkafka/rdkafka.h>
+*/
+import "C"
+
+// Error provides a Kafka-specific error container
+type Error struct {
+	code ErrorCode
+	str  string
+}
+
+func newError(code C.rd_kafka_resp_err_t) (err Error) {
+	return Error{ErrorCode(code), ""}
+}
+
+func newGoError(code ErrorCode) (err Error) {
+	return Error{code, ""}
+}
+
+func newErrorFromString(code ErrorCode, str string) (err Error) {
+	return Error{code, str}
+}
+
+func newErrorFromCString(code C.rd_kafka_resp_err_t, cstr *C.char) (err Error) {
+	var str string
+	if cstr != nil {
+		str = C.GoString(cstr)
+	} else {
+		str = ""
+	}
+	return Error{ErrorCode(code), str}
+}
+
+func newCErrorFromString(code C.rd_kafka_resp_err_t, str string) (err Error) {
+	return newErrorFromString(ErrorCode(code), str)
+}
+
+// Error returns a human readable representation of an Error
+// Same as Error.String()
+func (e Error) Error() string {
+	return e.String()
+}
+
+// String returns a human readable representation of an Error
+func (e Error) String() string {
+	if len(e.str) > 0 {
+		return e.str
+	}
+	return e.code.String()
+}
+
+// Code returns the ErrorCode of an Error
+func (e Error) Code() ErrorCode {
+	return e.code
+}
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/event.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/event.go
new file mode 100644
index 0000000..8a89edb
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/event.go
@@ -0,0 +1,330 @@
+package kafka
+
+/**
+ * Copyright 2016 Confluent Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import (
+	"fmt"
+	"os"
+	"unsafe"
+)
+
+/*
+#include <stdlib.h>
+#include <librdkafka/rdkafka.h>
+#include "glue_rdkafka.h"
+
+
+#ifdef RD_KAFKA_V_HEADERS
+void chdrs_to_tmphdrs (rd_kafka_headers_t *chdrs, tmphdr_t *tmphdrs) {
+   size_t i = 0;
+   const char *name;
+   const void *val;
+   size_t size;
+
+   while (!rd_kafka_header_get_all(chdrs, i,
+                                   &tmphdrs[i].key,
+                                   &tmphdrs[i].val,
+                                   (size_t *)&tmphdrs[i].size))
+     i++;
+}
+#endif
+
+rd_kafka_event_t *_rk_queue_poll (rd_kafka_queue_t *rkq, int timeoutMs,
+                                  rd_kafka_event_type_t *evtype,
+                                  fetched_c_msg_t *fcMsg,
+                                  rd_kafka_event_t *prev_rkev) {
+    rd_kafka_event_t *rkev;
+
+    if (prev_rkev)
+      rd_kafka_event_destroy(prev_rkev);
+
+    rkev = rd_kafka_queue_poll(rkq, timeoutMs);
+    *evtype = rd_kafka_event_type(rkev);
+
+    if (*evtype == RD_KAFKA_EVENT_FETCH) {
+#ifdef RD_KAFKA_V_HEADERS
+        rd_kafka_headers_t *hdrs;
+#endif
+
+        fcMsg->msg = (rd_kafka_message_t *)rd_kafka_event_message_next(rkev);
+        fcMsg->ts = rd_kafka_message_timestamp(fcMsg->msg, &fcMsg->tstype);
+
+#ifdef RD_KAFKA_V_HEADERS
+        if (!rd_kafka_message_headers(fcMsg->msg, &hdrs)) {
+           fcMsg->tmphdrsCnt = rd_kafka_header_cnt(hdrs);
+           fcMsg->tmphdrs = malloc(sizeof(*fcMsg->tmphdrs) * fcMsg->tmphdrsCnt);
+           chdrs_to_tmphdrs(hdrs, fcMsg->tmphdrs);
+        } else {
+#else
+        if (1) {
+#endif
+           fcMsg->tmphdrs = NULL;
+           fcMsg->tmphdrsCnt = 0;
+        }
+    }
+    return rkev;
+}
+*/
+import "C"
+
+// Event generic interface
+type Event interface {
+	// String returns a human-readable representation of the event
+	String() string
+}
+
+// Specific event types
+
+// Stats statistics event
+type Stats struct {
+	statsJSON string
+}
+
+func (e Stats) String() string {
+	return e.statsJSON
+}
+
+// AssignedPartitions consumer group rebalance event: assigned partition set
+type AssignedPartitions struct {
+	Partitions []TopicPartition
+}
+
+func (e AssignedPartitions) String() string {
+	return fmt.Sprintf("AssignedPartitions: %v", e.Partitions)
+}
+
+// RevokedPartitions consumer group rebalance event: revoked partition set
+type RevokedPartitions struct {
+	Partitions []TopicPartition
+}
+
+func (e RevokedPartitions) String() string {
+	return fmt.Sprintf("RevokedPartitions: %v", e.Partitions)
+}
+
+// PartitionEOF consumer reached end of partition
+type PartitionEOF TopicPartition
+
+func (p PartitionEOF) String() string {
+	return fmt.Sprintf("EOF at %s", TopicPartition(p))
+}
+
+// OffsetsCommitted reports committed offsets
+type OffsetsCommitted struct {
+	Error   error
+	Offsets []TopicPartition
+}
+
+func (o OffsetsCommitted) String() string {
+	return fmt.Sprintf("OffsetsCommitted (%v, %v)", o.Error, o.Offsets)
+}
+
+// eventPoll polls an event from the handler's C rd_kafka_queue_t,
+// translates it into an Event type and then sends on `channel` if non-nil, else returns the Event.
+// term_chan is an optional channel to monitor along with producing to channel
+// to indicate that `channel` is being terminated.
+// returns (event Event, terminate Bool) tuple, where Terminate indicates
+// if termChan received a termination event.
+func (h *handle) eventPoll(channel chan Event, timeoutMs int, maxEvents int, termChan chan bool) (Event, bool) {
+
+	var prevRkev *C.rd_kafka_event_t
+	term := false
+
+	var retval Event
+
+	if channel == nil {
+		maxEvents = 1
+	}
+out:
+	for evcnt := 0; evcnt < maxEvents; evcnt++ {
+		var evtype C.rd_kafka_event_type_t
+		var fcMsg C.fetched_c_msg_t
+		rkev := C._rk_queue_poll(h.rkq, C.int(timeoutMs), &evtype, &fcMsg, prevRkev)
+		prevRkev = rkev
+		timeoutMs = 0
+
+		retval = nil
+
+		switch evtype {
+		case C.RD_KAFKA_EVENT_FETCH:
+			// Consumer fetch event, new message.
+			// Extracted into temporary fcMsg for optimization
+			retval = h.newMessageFromFcMsg(&fcMsg)
+
+		case C.RD_KAFKA_EVENT_REBALANCE:
+			// Consumer rebalance event
+			// If the app provided a RebalanceCb to Subscribe*() or
+			// has go.application.rebalance.enable=true we create an event
+			// and forward it to the application thru the RebalanceCb or the
+			// Events channel respectively.
+			// Since librdkafka requires the rebalance event to be "acked" by
+			// the application to synchronize state we keep track of if the
+			// application performed Assign() or Unassign(), but this only works for
+			// the non-channel case. For the channel case we assume the application
+			// calls Assign() / Unassign().
+			// Failure to do so will "hang" the consumer, e.g., it wont start consuming
+			// and it wont close cleanly, so this error case should be visible
+			// immediately to the application developer.
+			appReassigned := false
+			if C.rd_kafka_event_error(rkev) == C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS {
+				if h.currAppRebalanceEnable {
+					// Application must perform Assign() call
+					var ev AssignedPartitions
+					ev.Partitions = newTopicPartitionsFromCparts(C.rd_kafka_event_topic_partition_list(rkev))
+					if channel != nil || h.c.rebalanceCb == nil {
+						retval = ev
+						appReassigned = true
+					} else {
+						appReassigned = h.c.rebalance(ev)
+					}
+				}
+
+				if !appReassigned {
+					C.rd_kafka_assign(h.rk, C.rd_kafka_event_topic_partition_list(rkev))
+				}
+			} else {
+				if h.currAppRebalanceEnable {
+					// Application must perform Unassign() call
+					var ev RevokedPartitions
+					ev.Partitions = newTopicPartitionsFromCparts(C.rd_kafka_event_topic_partition_list(rkev))
+					if channel != nil || h.c.rebalanceCb == nil {
+						retval = ev
+						appReassigned = true
+					} else {
+						appReassigned = h.c.rebalance(ev)
+					}
+				}
+
+				if !appReassigned {
+					C.rd_kafka_assign(h.rk, nil)
+				}
+			}
+
+		case C.RD_KAFKA_EVENT_ERROR:
+			// Error event
+			cErr := C.rd_kafka_event_error(rkev)
+			switch cErr {
+			case C.RD_KAFKA_RESP_ERR__PARTITION_EOF:
+				crktpar := C.rd_kafka_event_topic_partition(rkev)
+				if crktpar == nil {
+					break
+				}
+
+				defer C.rd_kafka_topic_partition_destroy(crktpar)
+				var peof PartitionEOF
+				setupTopicPartitionFromCrktpar((*TopicPartition)(&peof), crktpar)
+
+				retval = peof
+			default:
+				retval = newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev))
+			}
+
+		case C.RD_KAFKA_EVENT_STATS:
+			retval = &Stats{C.GoString(C.rd_kafka_event_stats(rkev))}
+
+		case C.RD_KAFKA_EVENT_DR:
+			// Producer Delivery Report event
+			// Each such event contains delivery reports for all
+			// messages in the produced batch.
+			// Forward delivery reports to per-message's response channel
+			// or to the global Producer.Events channel, or none.
+			rkmessages := make([]*C.rd_kafka_message_t, int(C.rd_kafka_event_message_count(rkev)))
+
+			cnt := int(C.rd_kafka_event_message_array(rkev, (**C.rd_kafka_message_t)(unsafe.Pointer(&rkmessages[0])), C.size_t(len(rkmessages))))
+
+			for _, rkmessage := range rkmessages[:cnt] {
+				msg := h.newMessageFromC(rkmessage)
+				var ch *chan Event
+
+				if rkmessage._private != nil {
+					// Find cgoif by id
+					cg, found := h.cgoGet((int)((uintptr)(rkmessage._private)))
+					if found {
+						cdr := cg.(cgoDr)
+
+						if cdr.deliveryChan != nil {
+							ch = &cdr.deliveryChan
+						}
+						msg.Opaque = cdr.opaque
+					}
+				}
+
+				if ch == nil && h.fwdDr {
+					ch = &channel
+				}
+
+				if ch != nil {
+					select {
+					case *ch <- msg:
+					case <-termChan:
+						break out
+					}
+
+				} else {
+					retval = msg
+					break out
+				}
+			}
+
+		case C.RD_KAFKA_EVENT_OFFSET_COMMIT:
+			// Offsets committed
+			cErr := C.rd_kafka_event_error(rkev)
+			coffsets := C.rd_kafka_event_topic_partition_list(rkev)
+			var offsets []TopicPartition
+			if coffsets != nil {
+				offsets = newTopicPartitionsFromCparts(coffsets)
+			}
+
+			if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR {
+				retval = OffsetsCommitted{newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev)), offsets}
+			} else {
+				retval = OffsetsCommitted{nil, offsets}
+			}
+
+		case C.RD_KAFKA_EVENT_NONE:
+			// poll timed out: no events available
+			break out
+
+		default:
+			if rkev != nil {
+				fmt.Fprintf(os.Stderr, "Ignored event %s\n",
+					C.GoString(C.rd_kafka_event_name(rkev)))
+			}
+
+		}
+
+		if retval != nil {
+			if channel != nil {
+				select {
+				case channel <- retval:
+				case <-termChan:
+					retval = nil
+					term = true
+					break out
+				}
+			} else {
+				break out
+			}
+		}
+	}
+
+	if prevRkev != nil {
+		C.rd_kafka_event_destroy(prevRkev)
+	}
+
+	return retval, term
+}
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/generated_errors.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/generated_errors.go
new file mode 100644
index 0000000..b9f68d8
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/generated_errors.go
@@ -0,0 +1,225 @@
+package kafka
+// Copyright 2016 Confluent Inc.
+// AUTOMATICALLY GENERATED BY /home/maglun/gocode/bin/go_rdkafka_generr ON 2018-10-11 09:26:58.938371378 +0200 CEST m=+0.001256618 USING librdkafka 0.11.5
+
+/*
+#include <librdkafka/rdkafka.h>
+*/
+import "C"
+
+// ErrorCode is the integer representation of local and broker error codes
+type ErrorCode int
+
+// String returns a human readable representation of an error code
+func (c ErrorCode) String() string {
+      return C.GoString(C.rd_kafka_err2str(C.rd_kafka_resp_err_t(c)))
+}
+
+const (
+    // ErrBadMsg Local: Bad message format
+    ErrBadMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_MSG)
+    // ErrBadCompression Local: Invalid compressed data
+    ErrBadCompression ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_COMPRESSION)
+    // ErrDestroy Local: Broker handle destroyed
+    ErrDestroy ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__DESTROY)
+    // ErrFail Local: Communication failure with broker
+    ErrFail ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FAIL)
+    // ErrTransport Local: Broker transport failure
+    ErrTransport ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TRANSPORT)
+    // ErrCritSysResource Local: Critical system resource failure
+    ErrCritSysResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE)
+    // ErrResolve Local: Host resolution failure
+    ErrResolve ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RESOLVE)
+    // ErrMsgTimedOut Local: Message timed out
+    ErrMsgTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MSG_TIMED_OUT)
+    // ErrPartitionEOF Broker: No more messages
+    ErrPartitionEOF ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTITION_EOF)
+    // ErrUnknownPartition Local: Unknown partition
+    ErrUnknownPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
+    // ErrFs Local: File or filesystem error
+    ErrFs ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FS)
+    // ErrUnknownTopic Local: Unknown topic
+    ErrUnknownTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
+    // ErrAllBrokersDown Local: All broker connections are down
+    ErrAllBrokersDown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN)
+    // ErrInvalidArg Local: Invalid argument or configuration
+    ErrInvalidArg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_ARG)
+    // ErrTimedOut Local: Timed out
+    ErrTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT)
+    // ErrQueueFull Local: Queue full
+    ErrQueueFull ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__QUEUE_FULL)
+    // ErrIsrInsuff Local: ISR count insufficient
+    ErrIsrInsuff ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ISR_INSUFF)
+    // ErrNodeUpdate Local: Broker node update
+    ErrNodeUpdate ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NODE_UPDATE)
+    // ErrSsl Local: SSL error
+    ErrSsl ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__SSL)
+    // ErrWaitCoord Local: Waiting for coordinator
+    ErrWaitCoord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_COORD)
+    // ErrUnknownGroup Local: Unknown group
+    ErrUnknownGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_GROUP)
+    // ErrInProgress Local: Operation in progress
+    ErrInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__IN_PROGRESS)
+    // ErrPrevInProgress Local: Previous operation in progress
+    ErrPrevInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS)
+    // ErrExistingSubscription Local: Existing subscription
+    ErrExistingSubscription ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION)
+    // ErrAssignPartitions Local: Assign partitions
+    ErrAssignPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
+    // ErrRevokePartitions Local: Revoke partitions
+    ErrRevokePartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS)
+    // ErrConflict Local: Conflicting use
+    ErrConflict ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CONFLICT)
+    // ErrState Local: Erroneous state
+    ErrState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__STATE)
+    // ErrUnknownProtocol Local: Unknown protocol
+    ErrUnknownProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL)
+    // ErrNotImplemented Local: Not implemented
+    ErrNotImplemented ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED)
+    // ErrAuthentication Local: Authentication failure
+    ErrAuthentication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTHENTICATION)
+    // ErrNoOffset Local: No offset stored
+    ErrNoOffset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NO_OFFSET)
+    // ErrOutdated Local: Outdated
+    ErrOutdated ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__OUTDATED)
+    // ErrTimedOutQueue Local: Timed out in queue
+    ErrTimedOutQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE)
+    // ErrUnsupportedFeature Local: Required feature not supported by broker
+    ErrUnsupportedFeature ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE)
+    // ErrWaitCache Local: Awaiting cache update
+    ErrWaitCache ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_CACHE)
+    // ErrIntr Local: Operation interrupted
+    ErrIntr ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INTR)
+    // ErrKeySerialization Local: Key serialization error
+    ErrKeySerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_SERIALIZATION)
+    // ErrValueSerialization Local: Value serialization error
+    ErrValueSerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION)
+    // ErrKeyDeserialization Local: Key deserialization error
+    ErrKeyDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION)
+    // ErrValueDeserialization Local: Value deserialization error
+    ErrValueDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION)
+    // ErrPartial Local: Partial response
+    ErrPartial ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTIAL)
+    // ErrReadOnly Local: Read-only object
+    ErrReadOnly ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__READ_ONLY)
+    // ErrNoent Local: No such entry
+    ErrNoent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOENT)
+    // ErrUnderflow Local: Read underflow
+    ErrUnderflow ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNDERFLOW)
+    // ErrInvalidType Local: Invalid type
+    ErrInvalidType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_TYPE)
+    // ErrUnknown Unknown broker error
+    ErrUnknown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN)
+    // ErrNoError Success
+    ErrNoError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_ERROR)
+    // ErrOffsetOutOfRange Broker: Offset out of range
+    ErrOffsetOutOfRange ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE)
+    // ErrInvalidMsg Broker: Invalid message
+    ErrInvalidMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG)
+    // ErrUnknownTopicOrPart Broker: Unknown topic or partition
+    ErrUnknownTopicOrPart ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART)
+    // ErrInvalidMsgSize Broker: Invalid message size
+    ErrInvalidMsgSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE)
+    // ErrLeaderNotAvailable Broker: Leader not available
+    ErrLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE)
+    // ErrNotLeaderForPartition Broker: Not leader for partition
+    ErrNotLeaderForPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION)
+    // ErrRequestTimedOut Broker: Request timed out
+    ErrRequestTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT)
+    // ErrBrokerNotAvailable Broker: Broker not available
+    ErrBrokerNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE)
+    // ErrReplicaNotAvailable Broker: Replica not available
+    ErrReplicaNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE)
+    // ErrMsgSizeTooLarge Broker: Message size too large
+    ErrMsgSizeTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE)
+    // ErrStaleCtrlEpoch Broker: StaleControllerEpochCode
+    ErrStaleCtrlEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH)
+    // ErrOffsetMetadataTooLarge Broker: Offset metadata string too large
+    ErrOffsetMetadataTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE)
+    // ErrNetworkException Broker: Broker disconnected before response received
+    ErrNetworkException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION)
+    // ErrGroupLoadInProgress Broker: Group coordinator load in progress
+    ErrGroupLoadInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS)
+    // ErrGroupCoordinatorNotAvailable Broker: Group coordinator not available
+    ErrGroupCoordinatorNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE)
+    // ErrNotCoordinatorForGroup Broker: Not coordinator for group
+    ErrNotCoordinatorForGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP)
+    // ErrTopicException Broker: Invalid topic
+    ErrTopicException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION)
+    // ErrRecordListTooLarge Broker: Message batch larger than configured server segment size
+    ErrRecordListTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE)
+    // ErrNotEnoughReplicas Broker: Not enough in-sync replicas
+    ErrNotEnoughReplicas ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS)
+    // ErrNotEnoughReplicasAfterAppend Broker: Message(s) written to insufficient number of in-sync replicas
+    ErrNotEnoughReplicasAfterAppend ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND)
+    // ErrInvalidRequiredAcks Broker: Invalid required acks value
+    ErrInvalidRequiredAcks ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS)
+    // ErrIllegalGeneration Broker: Specified group generation id is not valid
+    ErrIllegalGeneration ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION)
+    // ErrInconsistentGroupProtocol Broker: Inconsistent group protocol
+    ErrInconsistentGroupProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL)
+    // ErrInvalidGroupID Broker: Invalid group.id
+    ErrInvalidGroupID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_GROUP_ID)
+    // ErrUnknownMemberID Broker: Unknown member
+    ErrUnknownMemberID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID)
+    // ErrInvalidSessionTimeout Broker: Invalid session timeout
+    ErrInvalidSessionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT)
+    // ErrRebalanceInProgress Broker: Group rebalance in progress
+    ErrRebalanceInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS)
+    // ErrInvalidCommitOffsetSize Broker: Commit offset data size is not valid
+    ErrInvalidCommitOffsetSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE)
+    // ErrTopicAuthorizationFailed Broker: Topic authorization failed
+    ErrTopicAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED)
+    // ErrGroupAuthorizationFailed Broker: Group authorization failed
+    ErrGroupAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED)
+    // ErrClusterAuthorizationFailed Broker: Cluster authorization failed
+    ErrClusterAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED)
+    // ErrInvalidTimestamp Broker: Invalid timestamp
+    ErrInvalidTimestamp ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP)
+    // ErrUnsupportedSaslMechanism Broker: Unsupported SASL mechanism
+    ErrUnsupportedSaslMechanism ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM)
+    // ErrIllegalSaslState Broker: Request not valid in current SASL state
+    ErrIllegalSaslState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE)
+    // ErrUnsupportedVersion Broker: API version not supported
+    ErrUnsupportedVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION)
+    // ErrTopicAlreadyExists Broker: Topic already exists
+    ErrTopicAlreadyExists ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS)
+    // ErrInvalidPartitions Broker: Invalid number of partitions
+    ErrInvalidPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PARTITIONS)
+    // ErrInvalidReplicationFactor Broker: Invalid replication factor
+    ErrInvalidReplicationFactor ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR)
+    // ErrInvalidReplicaAssignment Broker: Invalid replica assignment
+    ErrInvalidReplicaAssignment ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT)
+    // ErrInvalidConfig Broker: Configuration is invalid
+    ErrInvalidConfig ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_CONFIG)
+    // ErrNotController Broker: Not controller for cluster
+    ErrNotController ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_CONTROLLER)
+    // ErrInvalidRequest Broker: Invalid request
+    ErrInvalidRequest ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUEST)
+    // ErrUnsupportedForMessageFormat Broker: Message format on broker does not support request
+    ErrUnsupportedForMessageFormat ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT)
+    // ErrPolicyViolation Broker: Isolation policy volation
+    ErrPolicyViolation ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_POLICY_VIOLATION)
+    // ErrOutOfOrderSequenceNumber Broker: Broker received an out of order sequence number
+    ErrOutOfOrderSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER)
+    // ErrDuplicateSequenceNumber Broker: Broker received a duplicate sequence number
+    ErrDuplicateSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER)
+    // ErrInvalidProducerEpoch Broker: Producer attempted an operation with an old epoch
+    ErrInvalidProducerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH)
+    // ErrInvalidTxnState Broker: Producer attempted a transactional operation in an invalid state
+    ErrInvalidTxnState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TXN_STATE)
+    // ErrInvalidProducerIDMapping Broker: Producer attempted to use a producer id which is not currently assigned to its transactional id
+    ErrInvalidProducerIDMapping ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING)
+    // ErrInvalidTransactionTimeout Broker: Transaction timeout is larger than the maximum value allowed by the broker's max.transaction.timeout.ms
+    ErrInvalidTransactionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT)
+    // ErrConcurrentTransactions Broker: Producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing
+    ErrConcurrentTransactions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS)
+    // ErrTransactionCoordinatorFenced Broker: Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer
+    ErrTransactionCoordinatorFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED)
+    // ErrTransactionalIDAuthorizationFailed Broker: Transactional Id authorization failed
+    ErrTransactionalIDAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED)
+    // ErrSecurityDisabled Broker: Security features are disabled
+    ErrSecurityDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SECURITY_DISABLED)
+    // ErrOperationNotAttempted Broker: Operation not attempted
+    ErrOperationNotAttempted ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED)
+)
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/glue_rdkafka.h b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/glue_rdkafka.h
new file mode 100644
index 0000000..adcef9a
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/glue_rdkafka.h
@@ -0,0 +1,46 @@
+/**
+ * Copyright 2016 Confluent Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+
+/**
+ * Glue between Go, Cgo and librdkafka
+ */
+
+
+/**
+ * Temporary C to Go header representation
+ */
+typedef struct tmphdr_s {
+  const char *key;
+  const void *val;   // producer: malloc()ed by Go code if size > 0
+                     // consumer: owned by librdkafka
+  ssize_t     size;
+} tmphdr_t;
+
+
+
+/**
+ * Represents a fetched C message, with all extra fields extracted
+ * to struct fields.
+ */
+typedef struct fetched_c_msg {
+  rd_kafka_message_t *msg;
+  rd_kafka_timestamp_type_t tstype;
+  int64_t ts;
+  tmphdr_t *tmphdrs;
+  size_t    tmphdrsCnt;
+} fetched_c_msg_t;
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/handle.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/handle.go
new file mode 100644
index 0000000..c09e64d
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/handle.go
@@ -0,0 +1,207 @@
+package kafka
+
+/**
+ * Copyright 2016 Confluent Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import (
+	"fmt"
+	"sync"
+	"unsafe"
+)
+
+/*
+#include <librdkafka/rdkafka.h>
+#include <stdlib.h>
+*/
+import "C"
+
+// Handle represents a generic client handle containing common parts for
+// both Producer and Consumer.
+type Handle interface {
+	gethandle() *handle
+}
+
+// Common instance handle for both Producer and Consumer
+type handle struct {
+	rk  *C.rd_kafka_t
+	rkq *C.rd_kafka_queue_t
+
+	// Termination of background go-routines
+	terminatedChan chan string // string is go-routine name
+
+	// Topic <-> rkt caches
+	rktCacheLock sync.Mutex
+	// topic name -> rkt cache
+	rktCache map[string]*C.rd_kafka_topic_t
+	// rkt -> topic name cache
+	rktNameCache map[*C.rd_kafka_topic_t]string
+
+	//
+	// cgo map
+	// Maps C callbacks based on cgoid back to its Go object
+	cgoLock   sync.Mutex
+	cgoidNext uintptr
+	cgomap    map[int]cgoif
+
+	//
+	// producer
+	//
+	p *Producer
+
+	// Forward delivery reports on Producer.Events channel
+	fwdDr bool
+
+	//
+	// consumer
+	//
+	c *Consumer
+
+	// Forward rebalancing ack responsibility to application (current setting)
+	currAppRebalanceEnable bool
+}
+
+func (h *handle) String() string {
+	return C.GoString(C.rd_kafka_name(h.rk))
+}
+
+func (h *handle) setup() {
+	h.rktCache = make(map[string]*C.rd_kafka_topic_t)
+	h.rktNameCache = make(map[*C.rd_kafka_topic_t]string)
+	h.cgomap = make(map[int]cgoif)
+	h.terminatedChan = make(chan string, 10)
+}
+
+func (h *handle) cleanup() {
+	for _, crkt := range h.rktCache {
+		C.rd_kafka_topic_destroy(crkt)
+	}
+
+	if h.rkq != nil {
+		C.rd_kafka_queue_destroy(h.rkq)
+	}
+}
+
+// waitTerminated waits termination of background go-routines.
+// termCnt is the number of goroutines expected to signal termination completion
+// on h.terminatedChan
+func (h *handle) waitTerminated(termCnt int) {
+	// Wait for termCnt termination-done events from goroutines
+	for ; termCnt > 0; termCnt-- {
+		_ = <-h.terminatedChan
+	}
+}
+
+// getRkt0 finds or creates and returns a C topic_t object from the local cache.
+func (h *handle) getRkt0(topic string, ctopic *C.char, doLock bool) (crkt *C.rd_kafka_topic_t) {
+	if doLock {
+		h.rktCacheLock.Lock()
+		defer h.rktCacheLock.Unlock()
+	}
+	crkt, ok := h.rktCache[topic]
+	if ok {
+		return crkt
+	}
+
+	if ctopic == nil {
+		ctopic = C.CString(topic)
+		defer C.free(unsafe.Pointer(ctopic))
+	}
+
+	crkt = C.rd_kafka_topic_new(h.rk, ctopic, nil)
+	if crkt == nil {
+		panic(fmt.Sprintf("Unable to create new C topic \"%s\": %s",
+			topic, C.GoString(C.rd_kafka_err2str(C.rd_kafka_last_error()))))
+	}
+
+	h.rktCache[topic] = crkt
+	h.rktNameCache[crkt] = topic
+
+	return crkt
+}
+
+// getRkt finds or creates and returns a C topic_t object from the local cache.
+func (h *handle) getRkt(topic string) (crkt *C.rd_kafka_topic_t) {
+	return h.getRkt0(topic, nil, true)
+}
+
+// getTopicNameFromRkt returns the topic name for a C topic_t object, preferably
+// using the local cache to avoid a cgo call.
+func (h *handle) getTopicNameFromRkt(crkt *C.rd_kafka_topic_t) (topic string) {
+	h.rktCacheLock.Lock()
+	defer h.rktCacheLock.Unlock()
+
+	topic, ok := h.rktNameCache[crkt]
+	if ok {
+		return topic
+	}
+
+	// we need our own copy/refcount of the crkt
+	ctopic := C.rd_kafka_topic_name(crkt)
+	topic = C.GoString(ctopic)
+
+	crkt = h.getRkt0(topic, ctopic, false /* dont lock */)
+
+	return topic
+}
+
+// cgoif is a generic interface for holding Go state passed as opaque
+// value to the C code.
+// Since pointers to complex Go types cannot be passed to C we instead create
+// a cgoif object, generate a unique id that is added to the cgomap,
+// and then pass that id to the C code. When the C code callback is called we
+// use the id to look up the cgoif object in the cgomap.
+type cgoif interface{}
+
+// delivery report cgoif container
+type cgoDr struct {
+	deliveryChan chan Event
+	opaque       interface{}
+}
+
+// cgoPut adds object cg to the handle's cgo map and returns a
+// unique id for the added entry.
+// Thread-safe.
+// FIXME: the uniquity of the id is questionable over time.
+func (h *handle) cgoPut(cg cgoif) (cgoid int) {
+	h.cgoLock.Lock()
+	defer h.cgoLock.Unlock()
+
+	h.cgoidNext++
+	if h.cgoidNext == 0 {
+		h.cgoidNext++
+	}
+	cgoid = (int)(h.cgoidNext)
+	h.cgomap[cgoid] = cg
+	return cgoid
+}
+
+// cgoGet looks up cgoid in the cgo map, deletes the reference from the map
+// and returns the object, if found. Else returns nil, false.
+// Thread-safe.
+func (h *handle) cgoGet(cgoid int) (cg cgoif, found bool) {
+	if cgoid == 0 {
+		return nil, false
+	}
+
+	h.cgoLock.Lock()
+	defer h.cgoLock.Unlock()
+	cg, found = h.cgomap[cgoid]
+	if found {
+		delete(h.cgomap, cgoid)
+	}
+
+	return cg, found
+}
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/header.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/header.go
new file mode 100644
index 0000000..67d6202
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/header.go
@@ -0,0 +1,67 @@
+package kafka
+
+/**
+ * Copyright 2018 Confluent Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import (
+	"fmt"
+	"strconv"
+)
+
+/*
+#include <string.h>
+#include <librdkafka/rdkafka.h>
+#include "glue_rdkafka.h"
+*/
+import "C"
+
+// Header represents a single Kafka message header.
+//
+// Message headers are made up of a list of Header elements, retaining their original insert
+// order and allowing for duplicate Keys.
+//
+// Key is a human readable string identifying the header.
+// Value is the key's binary value, Kafka does not put any restrictions on the format of
+// of the Value but it should be made relatively compact.
+// The value may be a byte array, empty, or nil.
+//
+// NOTE: Message headers are not available on producer delivery report messages.
+type Header struct {
+	Key   string // Header name (utf-8 string)
+	Value []byte // Header value (nil, empty, or binary)
+}
+
+// String returns the Header Key and data in a human representable possibly truncated form
+// suitable for displaying to the user.
+func (h Header) String() string {
+	if h.Value == nil {
+		return fmt.Sprintf("%s=nil", h.Key)
+	}
+
+	valueLen := len(h.Value)
+	if valueLen == 0 {
+		return fmt.Sprintf("%s=<empty>", h.Key)
+	}
+
+	truncSize := valueLen
+	trunc := ""
+	if valueLen > 50+15 {
+		truncSize = 50
+		trunc = fmt.Sprintf("(%d more bytes)", valueLen-truncSize)
+	}
+
+	return fmt.Sprintf("%s=%s%s", h.Key, strconv.Quote(string(h.Value[:truncSize])), trunc)
+}
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go
new file mode 100644
index 0000000..4883ee2
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go
@@ -0,0 +1,242 @@
+/**
+ * Copyright 2016 Confluent Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package kafka provides high-level Apache Kafka producer and consumers
+// using bindings on-top of the librdkafka C library.
+//
+//
+// High-level Consumer
+//
+// * Decide if you want to read messages and events from the `.Events()` channel
+// (set `"go.events.channel.enable": true`) or by calling `.Poll()`.
+//
+// * Create a Consumer with `kafka.NewConsumer()` providing at
+// least the `bootstrap.servers` and `group.id` configuration properties.
+//
+// * Call `.Subscribe()` or (`.SubscribeTopics()` to subscribe to multiple topics)
+// to join the group with the specified subscription set.
+// Subscriptions are atomic, calling `.Subscribe*()` again will leave
+// the group and rejoin with the new set of topics.
+//
+// * Start reading events and messages from either the `.Events` channel
+// or by calling `.Poll()`.
+//
+// * When the group has rebalanced each client member is assigned a
+// (sub-)set of topic+partitions.
+// By default the consumer will start fetching messages for its assigned
+// partitions at this point, but your application may enable rebalance
+// events to get an insight into what the assigned partitions where
+// as well as set the initial offsets. To do this you need to pass
+// `"go.application.rebalance.enable": true` to the `NewConsumer()` call
+// mentioned above. You will (eventually) see a `kafka.AssignedPartitions` event
+// with the assigned partition set. You can optionally modify the initial
+// offsets (they'll default to stored offsets and if there are no previously stored
+// offsets it will fall back to `"default.topic.config": ConfigMap{"auto.offset.reset": ..}`
+// which defaults to the `latest` message) and then call `.Assign(partitions)`
+// to start consuming. If you don't need to modify the initial offsets you will
+// not need to call `.Assign()`, the client will do so automatically for you if
+// you dont.
+//
+// * As messages are fetched they will be made available on either the
+// `.Events` channel or by calling `.Poll()`, look for event type `*kafka.Message`.
+//
+// * Handle messages, events and errors to your liking.
+//
+// * When you are done consuming call `.Close()` to commit final offsets
+// and leave the consumer group.
+//
+//
+//
+// Producer
+//
+// * Create a Producer with `kafka.NewProducer()` providing at least
+// the `bootstrap.servers` configuration properties.
+//
+// * Messages may now be produced either by sending a `*kafka.Message`
+// on the `.ProduceChannel` or by calling `.Produce()`.
+//
+// * Producing is an asynchronous operation so the client notifies the application
+// of per-message produce success or failure through something called delivery reports.
+// Delivery reports are by default emitted on the `.Events()` channel as `*kafka.Message`
+// and you should check `msg.TopicPartition.Error` for `nil` to find out if the message
+// was succesfully delivered or not.
+// It is also possible to direct delivery reports to alternate channels
+// by providing a non-nil `chan Event` channel to `.Produce()`.
+// If no delivery reports are wanted they can be completely disabled by
+// setting configuration property `"go.delivery.reports": false`.
+//
+// * When you are done producing messages you will need to make sure all messages
+// are indeed delivered to the broker (or failed), remember that this is
+// an asynchronous client so some of your messages may be lingering in internal
+// channels or tranmission queues.
+// To do this you can either keep track of the messages you've produced
+// and wait for their corresponding delivery reports, or call the convenience
+// function `.Flush()` that will block until all message deliveries are done
+// or the provided timeout elapses.
+//
+// * Finally call `.Close()` to decommission the producer.
+//
+//
+// Events
+//
+// Apart from emitting messages and delivery reports the client also communicates
+// with the application through a number of different event types.
+// An application may choose to handle or ignore these events.
+//
+// Consumer events
+//
+// * `*kafka.Message` - a fetched message.
+//
+// * `AssignedPartitions` - The assigned partition set for this client following a rebalance.
+// Requires `go.application.rebalance.enable`
+//
+// * `RevokedPartitions` - The counter part to `AssignedPartitions` following a rebalance.
+// `AssignedPartitions` and `RevokedPartitions` are symetrical.
+// Requires `go.application.rebalance.enable`
+//
+// * `PartitionEOF` - Consumer has reached the end of a partition.
+// NOTE: The consumer will keep trying to fetch new messages for the partition.
+//
+// * `OffsetsCommitted` - Offset commit results (when `enable.auto.commit` is enabled).
+//
+//
+// Producer events
+//
+// * `*kafka.Message` - delivery report for produced message.
+// Check `.TopicPartition.Error` for delivery result.
+//
+//
+// Generic events for both Consumer and Producer
+//
+// * `KafkaError` - client (error codes are prefixed with _) or broker error.
+// These errors are normally just informational since the
+// client will try its best to automatically recover (eventually).
+//
+//
+// Hint: If your application registers a signal notification
+// (signal.Notify) makes sure the signals channel is buffered to avoid
+// possible complications with blocking Poll() calls.
+//
+// Note: The Confluent Kafka Go client is safe for concurrent use.
+package kafka
+
+import (
+	"fmt"
+	"unsafe"
+)
+
+/*
+#include <stdlib.h>
+#include <string.h>
+#include <librdkafka/rdkafka.h>
+
+static rd_kafka_topic_partition_t *_c_rdkafka_topic_partition_list_entry(rd_kafka_topic_partition_list_t *rktparlist, int idx) {
+   return idx < rktparlist->cnt ? &rktparlist->elems[idx] : NULL;
+}
+*/
+import "C"
+
+// PartitionAny represents any partition (for partitioning),
+// or unspecified value (for all other cases)
+const PartitionAny = int32(C.RD_KAFKA_PARTITION_UA)
+
+// TopicPartition is a generic placeholder for a Topic+Partition and optionally Offset.
+type TopicPartition struct {
+	Topic     *string
+	Partition int32
+	Offset    Offset
+	Error     error
+}
+
+func (p TopicPartition) String() string {
+	topic := "<null>"
+	if p.Topic != nil {
+		topic = *p.Topic
+	}
+	if p.Error != nil {
+		return fmt.Sprintf("%s[%d]@%s(%s)",
+			topic, p.Partition, p.Offset, p.Error)
+	}
+	return fmt.Sprintf("%s[%d]@%s",
+		topic, p.Partition, p.Offset)
+}
+
+// TopicPartitions is a slice of TopicPartitions that also implements
+// the sort interface
+type TopicPartitions []TopicPartition
+
+func (tps TopicPartitions) Len() int {
+	return len(tps)
+}
+
+func (tps TopicPartitions) Less(i, j int) bool {
+	if *tps[i].Topic < *tps[j].Topic {
+		return true
+	} else if *tps[i].Topic > *tps[j].Topic {
+		return false
+	}
+	return tps[i].Partition < tps[j].Partition
+}
+
+func (tps TopicPartitions) Swap(i, j int) {
+	tps[i], tps[j] = tps[j], tps[i]
+}
+
+// new_cparts_from_TopicPartitions creates a new C rd_kafka_topic_partition_list_t
+// from a TopicPartition array.
+func newCPartsFromTopicPartitions(partitions []TopicPartition) (cparts *C.rd_kafka_topic_partition_list_t) {
+	cparts = C.rd_kafka_topic_partition_list_new(C.int(len(partitions)))
+	for _, part := range partitions {
+		ctopic := C.CString(*part.Topic)
+		defer C.free(unsafe.Pointer(ctopic))
+		rktpar := C.rd_kafka_topic_partition_list_add(cparts, ctopic, C.int32_t(part.Partition))
+		rktpar.offset = C.int64_t(part.Offset)
+	}
+
+	return cparts
+}
+
+func setupTopicPartitionFromCrktpar(partition *TopicPartition, crktpar *C.rd_kafka_topic_partition_t) {
+
+	topic := C.GoString(crktpar.topic)
+	partition.Topic = &topic
+	partition.Partition = int32(crktpar.partition)
+	partition.Offset = Offset(crktpar.offset)
+	if crktpar.err != C.RD_KAFKA_RESP_ERR_NO_ERROR {
+		partition.Error = newError(crktpar.err)
+	}
+}
+
+func newTopicPartitionsFromCparts(cparts *C.rd_kafka_topic_partition_list_t) (partitions []TopicPartition) {
+
+	partcnt := int(cparts.cnt)
+
+	partitions = make([]TopicPartition, partcnt)
+	for i := 0; i < partcnt; i++ {
+		crktpar := C._c_rdkafka_topic_partition_list_entry(cparts, C.int(i))
+		setupTopicPartitionFromCrktpar(&partitions[i], crktpar)
+	}
+
+	return partitions
+}
+
+// LibraryVersion returns the underlying librdkafka library version as a
+// (version_int, version_str) tuple.
+func LibraryVersion() (int, string) {
+	ver := (int)(C.rd_kafka_version())
+	verstr := C.GoString(C.rd_kafka_version_str())
+	return ver, verstr
+}
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/message.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/message.go
new file mode 100644
index 0000000..3472d1c
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/message.go
@@ -0,0 +1,207 @@
+package kafka
+
+/**
+ * Copyright 2016 Confluent Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import (
+	"fmt"
+	"time"
+	"unsafe"
+)
+
+/*
+#include <string.h>
+#include <stdlib.h>
+#include <librdkafka/rdkafka.h>
+#include "glue_rdkafka.h"
+
+void setup_rkmessage (rd_kafka_message_t *rkmessage,
+                      rd_kafka_topic_t *rkt, int32_t partition,
+                      const void *payload, size_t len,
+                      void *key, size_t keyLen, void *opaque) {
+     rkmessage->rkt       = rkt;
+     rkmessage->partition = partition;
+     rkmessage->payload   = (void *)payload;
+     rkmessage->len       = len;
+     rkmessage->key       = (void *)key;
+     rkmessage->key_len   = keyLen;
+     rkmessage->_private  = opaque;
+}
+*/
+import "C"
+
+// TimestampType is a the Message timestamp type or source
+//
+type TimestampType int
+
+const (
+	// TimestampNotAvailable indicates no timestamp was set, or not available due to lacking broker support
+	TimestampNotAvailable = TimestampType(C.RD_KAFKA_TIMESTAMP_NOT_AVAILABLE)
+	// TimestampCreateTime indicates timestamp set by producer (source time)
+	TimestampCreateTime = TimestampType(C.RD_KAFKA_TIMESTAMP_CREATE_TIME)
+	// TimestampLogAppendTime indicates timestamp set set by broker (store time)
+	TimestampLogAppendTime = TimestampType(C.RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME)
+)
+
+func (t TimestampType) String() string {
+	switch t {
+	case TimestampCreateTime:
+		return "CreateTime"
+	case TimestampLogAppendTime:
+		return "LogAppendTime"
+	case TimestampNotAvailable:
+		fallthrough
+	default:
+		return "NotAvailable"
+	}
+}
+
+// Message represents a Kafka message
+type Message struct {
+	TopicPartition TopicPartition
+	Value          []byte
+	Key            []byte
+	Timestamp      time.Time
+	TimestampType  TimestampType
+	Opaque         interface{}
+	Headers        []Header
+}
+
+// String returns a human readable representation of a Message.
+// Key and payload are not represented.
+func (m *Message) String() string {
+	var topic string
+	if m.TopicPartition.Topic != nil {
+		topic = *m.TopicPartition.Topic
+	} else {
+		topic = ""
+	}
+	return fmt.Sprintf("%s[%d]@%s", topic, m.TopicPartition.Partition, m.TopicPartition.Offset)
+}
+
+func (h *handle) getRktFromMessage(msg *Message) (crkt *C.rd_kafka_topic_t) {
+	if msg.TopicPartition.Topic == nil {
+		return nil
+	}
+
+	return h.getRkt(*msg.TopicPartition.Topic)
+}
+
+func (h *handle) newMessageFromFcMsg(fcMsg *C.fetched_c_msg_t) (msg *Message) {
+	msg = &Message{}
+
+	if fcMsg.ts != -1 {
+		ts := int64(fcMsg.ts)
+		msg.TimestampType = TimestampType(fcMsg.tstype)
+		msg.Timestamp = time.Unix(ts/1000, (ts%1000)*1000000)
+	}
+
+	if fcMsg.tmphdrsCnt > 0 {
+		msg.Headers = make([]Header, fcMsg.tmphdrsCnt)
+		for n := range msg.Headers {
+			tmphdr := (*[1 << 30]C.tmphdr_t)(unsafe.Pointer(fcMsg.tmphdrs))[n]
+			msg.Headers[n].Key = C.GoString(tmphdr.key)
+			if tmphdr.val != nil {
+				msg.Headers[n].Value = C.GoBytes(unsafe.Pointer(tmphdr.val), C.int(tmphdr.size))
+			} else {
+				msg.Headers[n].Value = nil
+			}
+		}
+		C.free(unsafe.Pointer(fcMsg.tmphdrs))
+	}
+
+	h.setupMessageFromC(msg, fcMsg.msg)
+
+	return msg
+}
+
+// setupMessageFromC sets up a message object from a C rd_kafka_message_t
+func (h *handle) setupMessageFromC(msg *Message, cmsg *C.rd_kafka_message_t) {
+	if cmsg.rkt != nil {
+		topic := h.getTopicNameFromRkt(cmsg.rkt)
+		msg.TopicPartition.Topic = &topic
+	}
+	msg.TopicPartition.Partition = int32(cmsg.partition)
+	if cmsg.payload != nil {
+		msg.Value = C.GoBytes(unsafe.Pointer(cmsg.payload), C.int(cmsg.len))
+	}
+	if cmsg.key != nil {
+		msg.Key = C.GoBytes(unsafe.Pointer(cmsg.key), C.int(cmsg.key_len))
+	}
+	msg.TopicPartition.Offset = Offset(cmsg.offset)
+	if cmsg.err != 0 {
+		msg.TopicPartition.Error = newError(cmsg.err)
+	}
+}
+
+// newMessageFromC creates a new message object from a C rd_kafka_message_t
+// NOTE: For use with Producer: does not set message timestamp fields.
+func (h *handle) newMessageFromC(cmsg *C.rd_kafka_message_t) (msg *Message) {
+	msg = &Message{}
+
+	h.setupMessageFromC(msg, cmsg)
+
+	return msg
+}
+
+// messageToC sets up cmsg as a clone of msg
+func (h *handle) messageToC(msg *Message, cmsg *C.rd_kafka_message_t) {
+	var valp unsafe.Pointer
+	var keyp unsafe.Pointer
+
+	// to circumvent Cgo constraints we need to allocate C heap memory
+	// for both Value and Key (one allocation back to back)
+	// and copy the bytes from Value and Key to the C memory.
+	// We later tell librdkafka (in produce()) to free the
+	// C memory pointer when it is done.
+	var payload unsafe.Pointer
+
+	valueLen := 0
+	keyLen := 0
+	if msg.Value != nil {
+		valueLen = len(msg.Value)
+	}
+	if msg.Key != nil {
+		keyLen = len(msg.Key)
+	}
+
+	allocLen := valueLen + keyLen
+	if allocLen > 0 {
+		payload = C.malloc(C.size_t(allocLen))
+		if valueLen > 0 {
+			copy((*[1 << 30]byte)(payload)[0:valueLen], msg.Value)
+			valp = payload
+		}
+		if keyLen > 0 {
+			copy((*[1 << 30]byte)(payload)[valueLen:allocLen], msg.Key)
+			keyp = unsafe.Pointer(&((*[1 << 31]byte)(payload)[valueLen]))
+		}
+	}
+
+	cmsg.rkt = h.getRktFromMessage(msg)
+	cmsg.partition = C.int32_t(msg.TopicPartition.Partition)
+	cmsg.payload = valp
+	cmsg.len = C.size_t(valueLen)
+	cmsg.key = keyp
+	cmsg.key_len = C.size_t(keyLen)
+	cmsg._private = nil
+}
+
+// used for testing messageToC performance
+func (h *handle) messageToCDummy(msg *Message) {
+	var cmsg C.rd_kafka_message_t
+	h.messageToC(msg, &cmsg)
+}
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/metadata.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/metadata.go
new file mode 100644
index 0000000..061147d
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/metadata.go
@@ -0,0 +1,158 @@
+/**
+ * Copyright 2016 Confluent Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka
+
+import (
+	"unsafe"
+)
+
+/*
+#include <stdlib.h>
+#include <librdkafka/rdkafka.h>
+
+struct rd_kafka_metadata_broker *_getMetadata_broker_element(struct rd_kafka_metadata *m, int i) {
+  return &m->brokers[i];
+}
+
+struct rd_kafka_metadata_topic *_getMetadata_topic_element(struct rd_kafka_metadata *m, int i) {
+  return &m->topics[i];
+}
+
+struct rd_kafka_metadata_partition *_getMetadata_partition_element(struct rd_kafka_metadata *m, int topic_idx, int partition_idx) {
+  return &m->topics[topic_idx].partitions[partition_idx];
+}
+
+int32_t _get_int32_element (int32_t *arr, int i) {
+  return arr[i];
+}
+
+*/
+import "C"
+
+// BrokerMetadata contains per-broker metadata
+type BrokerMetadata struct {
+	ID   int32
+	Host string
+	Port int
+}
+
+// PartitionMetadata contains per-partition metadata
+type PartitionMetadata struct {
+	ID       int32
+	Error    Error
+	Leader   int32
+	Replicas []int32
+	Isrs     []int32
+}
+
+// TopicMetadata contains per-topic metadata
+type TopicMetadata struct {
+	Topic      string
+	Partitions []PartitionMetadata
+	Error      Error
+}
+
+// Metadata contains broker and topic metadata for all (matching) topics
+type Metadata struct {
+	Brokers []BrokerMetadata
+	Topics  map[string]TopicMetadata
+
+	OriginatingBroker BrokerMetadata
+}
+
+// getMetadata queries broker for cluster and topic metadata.
+// If topic is non-nil only information about that topic is returned, else if
+// allTopics is false only information about locally used topics is returned,
+// else information about all topics is returned.
+func getMetadata(H Handle, topic *string, allTopics bool, timeoutMs int) (*Metadata, error) {
+	h := H.gethandle()
+
+	var rkt *C.rd_kafka_topic_t
+	if topic != nil {
+		rkt = h.getRkt(*topic)
+	}
+
+	var cMd *C.struct_rd_kafka_metadata
+	cErr := C.rd_kafka_metadata(h.rk, bool2cint(allTopics),
+		rkt, &cMd, C.int(timeoutMs))
+	if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR {
+		return nil, newError(cErr)
+	}
+
+	m := Metadata{}
+	defer C.rd_kafka_metadata_destroy(cMd)
+
+	m.Brokers = make([]BrokerMetadata, cMd.broker_cnt)
+	for i := 0; i < int(cMd.broker_cnt); i++ {
+		b := C._getMetadata_broker_element(cMd, C.int(i))
+		m.Brokers[i] = BrokerMetadata{int32(b.id), C.GoString(b.host),
+			int(b.port)}
+	}
+
+	m.Topics = make(map[string]TopicMetadata, int(cMd.topic_cnt))
+	for i := 0; i < int(cMd.topic_cnt); i++ {
+		t := C._getMetadata_topic_element(cMd, C.int(i))
+
+		thisTopic := C.GoString(t.topic)
+		m.Topics[thisTopic] = TopicMetadata{Topic: thisTopic,
+			Error:      newError(t.err),
+			Partitions: make([]PartitionMetadata, int(t.partition_cnt))}
+
+		for j := 0; j < int(t.partition_cnt); j++ {
+			p := C._getMetadata_partition_element(cMd, C.int(i), C.int(j))
+			m.Topics[thisTopic].Partitions[j] = PartitionMetadata{
+				ID:     int32(p.id),
+				Error:  newError(p.err),
+				Leader: int32(p.leader)}
+			m.Topics[thisTopic].Partitions[j].Replicas = make([]int32, int(p.replica_cnt))
+			for ir := 0; ir < int(p.replica_cnt); ir++ {
+				m.Topics[thisTopic].Partitions[j].Replicas[ir] = int32(C._get_int32_element(p.replicas, C.int(ir)))
+			}
+
+			m.Topics[thisTopic].Partitions[j].Isrs = make([]int32, int(p.isr_cnt))
+			for ii := 0; ii < int(p.isr_cnt); ii++ {
+				m.Topics[thisTopic].Partitions[j].Isrs[ii] = int32(C._get_int32_element(p.isrs, C.int(ii)))
+			}
+		}
+	}
+
+	m.OriginatingBroker = BrokerMetadata{int32(cMd.orig_broker_id),
+		C.GoString(cMd.orig_broker_name), 0}
+
+	return &m, nil
+}
+
+// queryWatermarkOffsets returns the broker's low and high offsets for the given topic
+// and partition.
+func queryWatermarkOffsets(H Handle, topic string, partition int32, timeoutMs int) (low, high int64, err error) {
+	h := H.gethandle()
+
+	ctopic := C.CString(topic)
+	defer C.free(unsafe.Pointer(ctopic))
+
+	var cLow, cHigh C.int64_t
+
+	e := C.rd_kafka_query_watermark_offsets(h.rk, ctopic, C.int32_t(partition),
+		&cLow, &cHigh, C.int(timeoutMs))
+	if e != C.RD_KAFKA_RESP_ERR_NO_ERROR {
+		return 0, 0, newError(e)
+	}
+
+	low = int64(cLow)
+	high = int64(cHigh)
+	return low, high, nil
+}
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/misc.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/misc.go
new file mode 100644
index 0000000..6d602ce
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/misc.go
@@ -0,0 +1,35 @@
+/**
+ * Copyright 2016 Confluent Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka
+
+import "C"
+
+// bool2int converts a bool to a C.int (1 or 0)
+func bool2cint(b bool) C.int {
+	if b {
+		return 1
+	}
+	return 0
+}
+
+// cint2bool converts a C.int to a bool
+func cint2bool(v C.int) bool {
+	if v == 0 {
+		return false
+	}
+	return true
+}
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/offset.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/offset.go
new file mode 100644
index 0000000..5dd7fd2
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/offset.go
@@ -0,0 +1,144 @@
+/**
+ * Copyright 2017 Confluent Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka
+
+import (
+	"fmt"
+	"strconv"
+)
+
+/*
+#include <stdlib.h>
+#include <librdkafka/rdkafka.h>
+
+static int64_t _c_rdkafka_offset_tail(int64_t rel) {
+   return RD_KAFKA_OFFSET_TAIL(rel);
+}
+*/
+import "C"
+
+// Offset type (int64) with support for canonical names
+type Offset int64
+
+// OffsetBeginning represents the earliest offset (logical)
+const OffsetBeginning = Offset(C.RD_KAFKA_OFFSET_BEGINNING)
+
+// OffsetEnd represents the latest offset (logical)
+const OffsetEnd = Offset(C.RD_KAFKA_OFFSET_END)
+
+// OffsetInvalid represents an invalid/unspecified offset
+const OffsetInvalid = Offset(C.RD_KAFKA_OFFSET_INVALID)
+
+// OffsetStored represents a stored offset
+const OffsetStored = Offset(C.RD_KAFKA_OFFSET_STORED)
+
+func (o Offset) String() string {
+	switch o {
+	case OffsetBeginning:
+		return "beginning"
+	case OffsetEnd:
+		return "end"
+	case OffsetInvalid:
+		return "unset"
+	case OffsetStored:
+		return "stored"
+	default:
+		return fmt.Sprintf("%d", int64(o))
+	}
+}
+
+// Set offset value, see NewOffset()
+func (o *Offset) Set(offset interface{}) error {
+	n, err := NewOffset(offset)
+
+	if err == nil {
+		*o = n
+	}
+
+	return err
+}
+
+// NewOffset creates a new Offset using the provided logical string, or an
+// absolute int64 offset value.
+// Logical offsets: "beginning", "earliest", "end", "latest", "unset", "invalid", "stored"
+func NewOffset(offset interface{}) (Offset, error) {
+
+	switch v := offset.(type) {
+	case string:
+		switch v {
+		case "beginning":
+			fallthrough
+		case "earliest":
+			return Offset(OffsetBeginning), nil
+
+		case "end":
+			fallthrough
+		case "latest":
+			return Offset(OffsetEnd), nil
+
+		case "unset":
+			fallthrough
+		case "invalid":
+			return Offset(OffsetInvalid), nil
+
+		case "stored":
+			return Offset(OffsetStored), nil
+
+		default:
+			off, err := strconv.Atoi(v)
+			return Offset(off), err
+		}
+
+	case int:
+		return Offset((int64)(v)), nil
+	case int64:
+		return Offset(v), nil
+	default:
+		return OffsetInvalid, newErrorFromString(ErrInvalidArg,
+			fmt.Sprintf("Invalid offset type: %t", v))
+	}
+}
+
+// OffsetTail returns the logical offset relativeOffset from current end of partition
+func OffsetTail(relativeOffset Offset) Offset {
+	return Offset(C._c_rdkafka_offset_tail(C.int64_t(relativeOffset)))
+}
+
+// offsetsForTimes looks up offsets by timestamp for the given partitions.
+//
+// The returned offset for each partition is the earliest offset whose
+// timestamp is greater than or equal to the given timestamp in the
+// corresponding partition.
+//
+// The timestamps to query are represented as `.Offset` in the `times`
+// argument and the looked up offsets are represented as `.Offset` in the returned
+// `offsets` list.
+//
+// The function will block for at most timeoutMs milliseconds.
+//
+// Duplicate Topic+Partitions are not supported.
+// Per-partition errors may be returned in the `.Error` field.
+func offsetsForTimes(H Handle, times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) {
+	cparts := newCPartsFromTopicPartitions(times)
+	defer C.rd_kafka_topic_partition_list_destroy(cparts)
+	cerr := C.rd_kafka_offsets_for_times(H.gethandle().rk, cparts, C.int(timeoutMs))
+	if cerr != C.RD_KAFKA_RESP_ERR_NO_ERROR {
+		return nil, newError(cerr)
+	}
+
+	return newTopicPartitionsFromCparts(cparts), nil
+}
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/producer.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/producer.go
new file mode 100644
index 0000000..7eac912
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/producer.go
@@ -0,0 +1,583 @@
+/**
+ * Copyright 2016 Confluent Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka
+
+import (
+	"fmt"
+	"math"
+	"time"
+	"unsafe"
+)
+
+/*
+#include <stdlib.h>
+#include <librdkafka/rdkafka.h>
+#include "glue_rdkafka.h"
+
+
+#ifdef RD_KAFKA_V_HEADERS
+// Convert tmphdrs to chdrs (created by this function).
+// If tmphdr.size == -1: value is considered Null
+//    tmphdr.size == 0:  value is considered empty (ignored)
+//    tmphdr.size > 0:   value is considered non-empty
+//
+// WARNING: The header keys and values will be freed by this function.
+void tmphdrs_to_chdrs (tmphdr_t *tmphdrs, size_t tmphdrsCnt,
+                       rd_kafka_headers_t **chdrs) {
+   size_t i;
+
+   *chdrs = rd_kafka_headers_new(tmphdrsCnt);
+
+   for (i = 0 ; i < tmphdrsCnt ; i++) {
+      rd_kafka_header_add(*chdrs,
+                          tmphdrs[i].key, -1,
+                          tmphdrs[i].size == -1 ? NULL :
+                          (tmphdrs[i].size == 0 ? "" : tmphdrs[i].val),
+                          tmphdrs[i].size == -1 ? 0 : tmphdrs[i].size);
+      if (tmphdrs[i].size > 0)
+         free((void *)tmphdrs[i].val);
+      free((void *)tmphdrs[i].key);
+   }
+}
+
+#else
+void free_tmphdrs (tmphdr_t *tmphdrs, size_t tmphdrsCnt) {
+   size_t i;
+   for (i = 0 ; i < tmphdrsCnt ; i++) {
+      if (tmphdrs[i].size > 0)
+         free((void *)tmphdrs[i].val);
+      free((void *)tmphdrs[i].key);
+   }
+}
+#endif
+
+
+rd_kafka_resp_err_t do_produce (rd_kafka_t *rk,
+          rd_kafka_topic_t *rkt, int32_t partition,
+          int msgflags,
+          int valIsNull, void *val, size_t val_len,
+          int keyIsNull, void *key, size_t key_len,
+          int64_t timestamp,
+          tmphdr_t *tmphdrs, size_t tmphdrsCnt,
+          uintptr_t cgoid) {
+  void *valp = valIsNull ? NULL : val;
+  void *keyp = keyIsNull ? NULL : key;
+#ifdef RD_KAFKA_V_TIMESTAMP
+rd_kafka_resp_err_t err;
+#ifdef RD_KAFKA_V_HEADERS
+  rd_kafka_headers_t *hdrs = NULL;
+#endif
+#endif
+
+
+  if (tmphdrsCnt > 0) {
+#ifdef RD_KAFKA_V_HEADERS
+     tmphdrs_to_chdrs(tmphdrs, tmphdrsCnt, &hdrs);
+#else
+     free_tmphdrs(tmphdrs, tmphdrsCnt);
+     return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED;
+#endif
+  }
+
+
+#ifdef RD_KAFKA_V_TIMESTAMP
+  err = rd_kafka_producev(rk,
+        RD_KAFKA_V_RKT(rkt),
+        RD_KAFKA_V_PARTITION(partition),
+        RD_KAFKA_V_MSGFLAGS(msgflags),
+        RD_KAFKA_V_VALUE(valp, val_len),
+        RD_KAFKA_V_KEY(keyp, key_len),
+        RD_KAFKA_V_TIMESTAMP(timestamp),
+#ifdef RD_KAFKA_V_HEADERS
+        RD_KAFKA_V_HEADERS(hdrs),
+#endif
+        RD_KAFKA_V_OPAQUE((void *)cgoid),
+        RD_KAFKA_V_END);
+#ifdef RD_KAFKA_V_HEADERS
+  if (err && hdrs)
+    rd_kafka_headers_destroy(hdrs);
+#endif
+  return err;
+#else
+  if (timestamp)
+      return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED;
+  if (rd_kafka_produce(rkt, partition, msgflags,
+                       valp, val_len,
+                       keyp, key_len,
+                       (void *)cgoid) == -1)
+      return rd_kafka_last_error();
+  else
+      return RD_KAFKA_RESP_ERR_NO_ERROR;
+#endif
+}
+*/
+import "C"
+
+// Producer implements a High-level Apache Kafka Producer instance
+type Producer struct {
+	events         chan Event
+	produceChannel chan *Message
+	handle         handle
+
+	// Terminates the poller() goroutine
+	pollerTermChan chan bool
+}
+
+// String returns a human readable name for a Producer instance
+func (p *Producer) String() string {
+	return p.handle.String()
+}
+
+// get_handle implements the Handle interface
+func (p *Producer) gethandle() *handle {
+	return &p.handle
+}
+
+func (p *Producer) produce(msg *Message, msgFlags int, deliveryChan chan Event) error {
+	if msg == nil || msg.TopicPartition.Topic == nil || len(*msg.TopicPartition.Topic) == 0 {
+		return newErrorFromString(ErrInvalidArg, "")
+	}
+
+	crkt := p.handle.getRkt(*msg.TopicPartition.Topic)
+
+	// Three problems:
+	//  1) There's a difference between an empty Value or Key (length 0, proper pointer) and
+	//     a null Value or Key (length 0, null pointer).
+	//  2) we need to be able to send a null Value or Key, but the unsafe.Pointer(&slice[0])
+	//     dereference can't be performed on a nil slice.
+	//  3) cgo's pointer checking requires the unsafe.Pointer(slice..) call to be made
+	//     in the call to the C function.
+	//
+	// Solution:
+	//  Keep track of whether the Value or Key were nil (1), but let the valp and keyp pointers
+	//  point to a 1-byte slice (but the length to send is still 0) so that the dereference (2)
+	//  works.
+	//  Then perform the unsafe.Pointer() on the valp and keyp pointers (which now either point
+	//  to the original msg.Value and msg.Key or to the 1-byte slices) in the call to C (3).
+	//
+	var valp []byte
+	var keyp []byte
+	oneByte := []byte{0}
+	var valIsNull C.int
+	var keyIsNull C.int
+	var valLen int
+	var keyLen int
+
+	if msg.Value == nil {
+		valIsNull = 1
+		valLen = 0
+		valp = oneByte
+	} else {
+		valLen = len(msg.Value)
+		if valLen > 0 {
+			valp = msg.Value
+		} else {
+			valp = oneByte
+		}
+	}
+
+	if msg.Key == nil {
+		keyIsNull = 1
+		keyLen = 0
+		keyp = oneByte
+	} else {
+		keyLen = len(msg.Key)
+		if keyLen > 0 {
+			keyp = msg.Key
+		} else {
+			keyp = oneByte
+		}
+	}
+
+	var cgoid int
+
+	// Per-message state that needs to be retained through the C code:
+	//   delivery channel (if specified)
+	//   message opaque   (if specified)
+	// Since these cant be passed as opaque pointers to the C code,
+	// due to cgo constraints, we add them to a per-producer map for lookup
+	// when the C code triggers the callbacks or events.
+	if deliveryChan != nil || msg.Opaque != nil {
+		cgoid = p.handle.cgoPut(cgoDr{deliveryChan: deliveryChan, opaque: msg.Opaque})
+	}
+
+	var timestamp int64
+	if !msg.Timestamp.IsZero() {
+		timestamp = msg.Timestamp.UnixNano() / 1000000
+	}
+
+	// Convert headers to C-friendly tmphdrs
+	var tmphdrs []C.tmphdr_t
+	tmphdrsCnt := len(msg.Headers)
+
+	if tmphdrsCnt > 0 {
+		tmphdrs = make([]C.tmphdr_t, tmphdrsCnt)
+
+		for n, hdr := range msg.Headers {
+			// Make a copy of the key
+			// to avoid runtime panic with
+			// foreign Go pointers in cgo.
+			tmphdrs[n].key = C.CString(hdr.Key)
+			if hdr.Value != nil {
+				tmphdrs[n].size = C.ssize_t(len(hdr.Value))
+				if tmphdrs[n].size > 0 {
+					// Make a copy of the value
+					// to avoid runtime panic with
+					// foreign Go pointers in cgo.
+					tmphdrs[n].val = C.CBytes(hdr.Value)
+				}
+			} else {
+				// null value
+				tmphdrs[n].size = C.ssize_t(-1)
+			}
+		}
+	} else {
+		// no headers, need a dummy tmphdrs of size 1 to avoid index
+		// out of bounds panic in do_produce() call below.
+		// tmphdrsCnt will be 0.
+		tmphdrs = []C.tmphdr_t{{nil, nil, 0}}
+	}
+
+	cErr := C.do_produce(p.handle.rk, crkt,
+		C.int32_t(msg.TopicPartition.Partition),
+		C.int(msgFlags)|C.RD_KAFKA_MSG_F_COPY,
+		valIsNull, unsafe.Pointer(&valp[0]), C.size_t(valLen),
+		keyIsNull, unsafe.Pointer(&keyp[0]), C.size_t(keyLen),
+		C.int64_t(timestamp),
+		(*C.tmphdr_t)(unsafe.Pointer(&tmphdrs[0])), C.size_t(tmphdrsCnt),
+		(C.uintptr_t)(cgoid))
+	if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR {
+		if cgoid != 0 {
+			p.handle.cgoGet(cgoid)
+		}
+		return newError(cErr)
+	}
+
+	return nil
+}
+
+// Produce single message.
+// This is an asynchronous call that enqueues the message on the internal
+// transmit queue, thus returning immediately.
+// The delivery report will be sent on the provided deliveryChan if specified,
+// or on the Producer object's Events() channel if not.
+// msg.Timestamp requires librdkafka >= 0.9.4 (else returns ErrNotImplemented),
+// api.version.request=true, and broker >= 0.10.0.0.
+// msg.Headers requires librdkafka >= 0.11.4 (else returns ErrNotImplemented),
+// api.version.request=true, and broker >= 0.11.0.0.
+// Returns an error if message could not be enqueued.
+func (p *Producer) Produce(msg *Message, deliveryChan chan Event) error {
+	return p.produce(msg, 0, deliveryChan)
+}
+
+// Produce a batch of messages.
+// These batches do not relate to the message batches sent to the broker, the latter
+// are collected on the fly internally in librdkafka.
+// WARNING: This is an experimental API.
+// NOTE: timestamps and headers are not supported with this API.
+func (p *Producer) produceBatch(topic string, msgs []*Message, msgFlags int) error {
+	crkt := p.handle.getRkt(topic)
+
+	cmsgs := make([]C.rd_kafka_message_t, len(msgs))
+	for i, m := range msgs {
+		p.handle.messageToC(m, &cmsgs[i])
+	}
+	r := C.rd_kafka_produce_batch(crkt, C.RD_KAFKA_PARTITION_UA, C.int(msgFlags)|C.RD_KAFKA_MSG_F_FREE,
+		(*C.rd_kafka_message_t)(&cmsgs[0]), C.int(len(msgs)))
+	if r == -1 {
+		return newError(C.rd_kafka_last_error())
+	}
+
+	return nil
+}
+
+// Events returns the Events channel (read)
+func (p *Producer) Events() chan Event {
+	return p.events
+}
+
+// ProduceChannel returns the produce *Message channel (write)
+func (p *Producer) ProduceChannel() chan *Message {
+	return p.produceChannel
+}
+
+// Len returns the number of messages and requests waiting to be transmitted to the broker
+// as well as delivery reports queued for the application.
+// Includes messages on ProduceChannel.
+func (p *Producer) Len() int {
+	return len(p.produceChannel) + len(p.events) + int(C.rd_kafka_outq_len(p.handle.rk))
+}
+
+// Flush and wait for outstanding messages and requests to complete delivery.
+// Includes messages on ProduceChannel.
+// Runs until value reaches zero or on timeoutMs.
+// Returns the number of outstanding events still un-flushed.
+func (p *Producer) Flush(timeoutMs int) int {
+	termChan := make(chan bool) // unused stand-in termChan
+
+	d, _ := time.ParseDuration(fmt.Sprintf("%dms", timeoutMs))
+	tEnd := time.Now().Add(d)
+	for p.Len() > 0 {
+		remain := tEnd.Sub(time.Now()).Seconds()
+		if remain <= 0.0 {
+			return p.Len()
+		}
+
+		p.handle.eventPoll(p.events,
+			int(math.Min(100, remain*1000)), 1000, termChan)
+	}
+
+	return 0
+}
+
+// Close a Producer instance.
+// The Producer object or its channels are no longer usable after this call.
+func (p *Producer) Close() {
+	// Wait for poller() (signaled by closing pollerTermChan)
+	// and channel_producer() (signaled by closing ProduceChannel)
+	close(p.pollerTermChan)
+	close(p.produceChannel)
+	p.handle.waitTerminated(2)
+
+	close(p.events)
+
+	p.handle.cleanup()
+
+	C.rd_kafka_destroy(p.handle.rk)
+}
+
+// NewProducer creates a new high-level Producer instance.
+//
+// conf is a *ConfigMap with standard librdkafka configuration properties, see here:
+//
+//
+//
+//
+//
+// Supported special configuration properties:
+//   go.batch.producer (bool, false) - EXPERIMENTAL: Enable batch producer (for increased performance).
+//                                     These batches do not relate to Kafka message batches in any way.
+//                                     Note: timestamps and headers are not supported with this interface.
+//   go.delivery.reports (bool, true) - Forward per-message delivery reports to the
+//                                      Events() channel.
+//   go.events.channel.size (int, 1000000) - Events() channel size
+//   go.produce.channel.size (int, 1000000) - ProduceChannel() buffer size (in number of messages)
+//
+func NewProducer(conf *ConfigMap) (*Producer, error) {
+
+	err := versionCheck()
+	if err != nil {
+		return nil, err
+	}
+
+	p := &Producer{}
+
+	// before we do anything with the configuration, create a copy such that
+	// the original is not mutated.
+	confCopy := conf.clone()
+
+	v, err := confCopy.extract("go.batch.producer", false)
+	if err != nil {
+		return nil, err
+	}
+	batchProducer := v.(bool)
+
+	v, err = confCopy.extract("go.delivery.reports", true)
+	if err != nil {
+		return nil, err
+	}
+	p.handle.fwdDr = v.(bool)
+
+	v, err = confCopy.extract("go.events.channel.size", 1000000)
+	if err != nil {
+		return nil, err
+	}
+	eventsChanSize := v.(int)
+
+	v, err = confCopy.extract("go.produce.channel.size", 1000000)
+	if err != nil {
+		return nil, err
+	}
+	produceChannelSize := v.(int)
+
+	if int(C.rd_kafka_version()) < 0x01000000 {
+		// produce.offset.report is no longer used in librdkafka >= v1.0.0
+		v, _ = confCopy.extract("{topic}.produce.offset.report", nil)
+		if v == nil {
+			// Enable offset reporting by default, unless overriden.
+			confCopy.SetKey("{topic}.produce.offset.report", true)
+		}
+	}
+
+	// Convert ConfigMap to librdkafka conf_t
+	cConf, err := confCopy.convert()
+	if err != nil {
+		return nil, err
+	}
+
+	cErrstr := (*C.char)(C.malloc(C.size_t(256)))
+	defer C.free(unsafe.Pointer(cErrstr))
+
+	C.rd_kafka_conf_set_events(cConf, C.RD_KAFKA_EVENT_DR|C.RD_KAFKA_EVENT_STATS|C.RD_KAFKA_EVENT_ERROR)
+
+	// Create librdkafka producer instance
+	p.handle.rk = C.rd_kafka_new(C.RD_KAFKA_PRODUCER, cConf, cErrstr, 256)
+	if p.handle.rk == nil {
+		return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr)
+	}
+
+	p.handle.p = p
+	p.handle.setup()
+	p.handle.rkq = C.rd_kafka_queue_get_main(p.handle.rk)
+	p.events = make(chan Event, eventsChanSize)
+	p.produceChannel = make(chan *Message, produceChannelSize)
+	p.pollerTermChan = make(chan bool)
+
+	go poller(p, p.pollerTermChan)
+
+	// non-batch or batch producer, only one must be used
+	if batchProducer {
+		go channelBatchProducer(p)
+	} else {
+		go channelProducer(p)
+	}
+
+	return p, nil
+}
+
+// channel_producer serves the ProduceChannel channel
+func channelProducer(p *Producer) {
+
+	for m := range p.produceChannel {
+		err := p.produce(m, C.RD_KAFKA_MSG_F_BLOCK, nil)
+		if err != nil {
+			m.TopicPartition.Error = err
+			p.events <- m
+		}
+	}
+
+	p.handle.terminatedChan <- "channelProducer"
+}
+
+// channelBatchProducer serves the ProduceChannel channel and attempts to
+// improve cgo performance by using the produceBatch() interface.
+func channelBatchProducer(p *Producer) {
+	var buffered = make(map[string][]*Message)
+	bufferedCnt := 0
+	const batchSize int = 1000000
+	totMsgCnt := 0
+	totBatchCnt := 0
+
+	for m := range p.produceChannel {
+		buffered[*m.TopicPartition.Topic] = append(buffered[*m.TopicPartition.Topic], m)
+		bufferedCnt++
+
+	loop2:
+		for true {
+			select {
+			case m, ok := <-p.produceChannel:
+				if !ok {
+					break loop2
+				}
+				if m == nil {
+					panic("nil message received on ProduceChannel")
+				}
+				if m.TopicPartition.Topic == nil {
+					panic(fmt.Sprintf("message without Topic received on ProduceChannel: %v", m))
+				}
+				buffered[*m.TopicPartition.Topic] = append(buffered[*m.TopicPartition.Topic], m)
+				bufferedCnt++
+				if bufferedCnt >= batchSize {
+					break loop2
+				}
+			default:
+				break loop2
+			}
+		}
+
+		totBatchCnt++
+		totMsgCnt += len(buffered)
+
+		for topic, buffered2 := range buffered {
+			err := p.produceBatch(topic, buffered2, C.RD_KAFKA_MSG_F_BLOCK)
+			if err != nil {
+				for _, m = range buffered2 {
+					m.TopicPartition.Error = err
+					p.events <- m
+				}
+			}
+		}
+
+		buffered = make(map[string][]*Message)
+		bufferedCnt = 0
+	}
+	p.handle.terminatedChan <- "channelBatchProducer"
+}
+
+// poller polls the rd_kafka_t handle for events until signalled for termination
+func poller(p *Producer, termChan chan bool) {
+out:
+	for true {
+		select {
+		case _ = <-termChan:
+			break out
+
+		default:
+			_, term := p.handle.eventPoll(p.events, 100, 1000, termChan)
+			if term {
+				break out
+			}
+			break
+		}
+	}
+
+	p.handle.terminatedChan <- "poller"
+
+}
+
+// GetMetadata queries broker for cluster and topic metadata.
+// If topic is non-nil only information about that topic is returned, else if
+// allTopics is false only information about locally used topics is returned,
+// else information about all topics is returned.
+// GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API.
+func (p *Producer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) {
+	return getMetadata(p, topic, allTopics, timeoutMs)
+}
+
+// QueryWatermarkOffsets returns the broker's low and high offsets for the given topic
+// and partition.
+func (p *Producer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error) {
+	return queryWatermarkOffsets(p, topic, partition, timeoutMs)
+}
+
+// OffsetsForTimes looks up offsets by timestamp for the given partitions.
+//
+// The returned offset for each partition is the earliest offset whose
+// timestamp is greater than or equal to the given timestamp in the
+// corresponding partition.
+//
+// The timestamps to query are represented as `.Offset` in the `times`
+// argument and the looked up offsets are represented as `.Offset` in the returned
+// `offsets` list.
+//
+// The function will block for at most timeoutMs milliseconds.
+//
+// Duplicate Topic+Partitions are not supported.
+// Per-partition errors may be returned in the `.Error` field.
+func (p *Producer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) {
+	return offsetsForTimes(p, times, timeoutMs)
+}
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/testconf-example.json b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/testconf-example.json
new file mode 100644
index 0000000..7024a9c
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/testconf-example.json
@@ -0,0 +1,8 @@
+{
+    "Brokers": "mybroker or $BROKERS env",
+    "Topic": "test",
+    "GroupID": "testgroup",
+    "PerfMsgCount": 1000000,
+    "PerfMsgSize": 100,
+    "Config": ["api.version.request=true"]
+}
diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/testhelpers.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/testhelpers.go
new file mode 100644
index 0000000..22e8e1a
--- /dev/null
+++ b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/testhelpers.go
@@ -0,0 +1,179 @@
+package kafka
+
+/**
+ * Copyright 2016 Confluent Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import (
+	"encoding/json"
+	"fmt"
+	"os"
+	"time"
+)
+
+/*
+#include <librdkafka/rdkafka.h>
+*/
+import "C"
+
+var testconf struct {
+	Brokers      string
+	Topic        string
+	GroupID      string
+	PerfMsgCount int
+	PerfMsgSize  int
+	Config       []string
+	conf         ConfigMap
+}
+
+// testconf_read reads the test suite config file testconf.json which must
+// contain at least Brokers and Topic string properties.
+// Returns true if the testconf was found and usable, false if no such file, or panics
+// if the file format is wrong.
+func testconfRead() bool {
+	cf, err := os.Open("testconf.json")
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "%% testconf.json not found - ignoring test\n")
+		return false
+	}
+
+	// Default values
+	testconf.PerfMsgCount = 2000000
+	testconf.PerfMsgSize = 100
+	testconf.GroupID = "testgroup"
+
+	jp := json.NewDecoder(cf)
+	err = jp.Decode(&testconf)
+	if err != nil {
+		panic(fmt.Sprintf("Failed to parse testconf: %s", err))
+	}
+
+	cf.Close()
+
+	if testconf.Brokers[0] == '$' {
+		// Read broker list from environment variable
+		testconf.Brokers = os.Getenv(testconf.Brokers[1:])
+	}
+
+	if testconf.Brokers == "" || testconf.Topic == "" {
+		panic("Missing Brokers or Topic in testconf.json")
+	}
+
+	return true
+}
+
+// update existing ConfigMap with key=value pairs from testconf.Config
+func (cm *ConfigMap) updateFromTestconf() error {
+	if testconf.Config == nil {
+		return nil
+	}
+
+	// Translate "key=value" pairs in Config to ConfigMap
+	for _, s := range testconf.Config {
+		err := cm.Set(s)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+
+}
+
+// Return the number of messages available in all partitions of a topic.
+// WARNING: This uses watermark offsets so it will be incorrect for compacted topics.
+func getMessageCountInTopic(topic string) (int, error) {
+
+	// Create consumer
+	config := &ConfigMap{"bootstrap.servers": testconf.Brokers,
+		"group.id": testconf.GroupID}
+	config.updateFromTestconf()
+
+	c, err := NewConsumer(config)
+	if err != nil {
+		return 0, err
+	}
+
+	// get metadata for the topic to find out number of partitions
+
+	metadata, err := c.GetMetadata(&topic, false, 5*1000)
+	if err != nil {
+		return 0, err
+	}
+
+	t, ok := metadata.Topics[topic]
+	if !ok {
+		return 0, newError(C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
+	}
+
+	cnt := 0
+	for _, p := range t.Partitions {
+		low, high, err := c.QueryWatermarkOffsets(topic, p.ID, 5*1000)
+		if err != nil {
+			continue
+		}
+		cnt += int(high - low)
+	}
+
+	return cnt, nil
+}
+
+// getBrokerList returns a list of brokers (ids) in the cluster
+func getBrokerList(H Handle) (brokers []int32, err error) {
+	md, err := getMetadata(H, nil, true, 15*1000)
+	if err != nil {
+		return nil, err
+	}
+
+	brokers = make([]int32, len(md.Brokers))
+	for i, mdBroker := range md.Brokers {
+		brokers[i] = mdBroker.ID
+	}
+
+	return brokers, nil
+}
+
+// waitTopicInMetadata waits for the given topic to show up in metadata
+func waitTopicInMetadata(H Handle, topic string, timeoutMs int) error {
+	d, _ := time.ParseDuration(fmt.Sprintf("%dms", timeoutMs))
+	tEnd := time.Now().Add(d)
+
+	for {
+		remain := tEnd.Sub(time.Now()).Seconds()
+		if remain < 0.0 {
+			return newErrorFromString(ErrTimedOut,
+				fmt.Sprintf("Timed out waiting for topic %s to appear in metadata", topic))
+		}
+
+		md, err := getMetadata(H, nil, true, int(remain*1000))
+		if err != nil {
+			return err
+		}
+
+		for _, t := range md.Topics {
+			if t.Topic != topic {
+				continue
+			}
+			if t.Error.Code() != ErrNoError || len(t.Partitions) < 1 {
+				continue
+			}
+			// Proper topic found in metadata
+			return nil
+		}
+
+		time.Sleep(500 * 1000) // 500ms
+	}
+
+}
diff --git a/vendor/github.com/coreos/etcd/Documentation/README.md b/vendor/github.com/coreos/etcd/Documentation/README.md
new file mode 120000
index 0000000..8828313
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/Documentation/README.md
@@ -0,0 +1 @@
+docs.md
\ No newline at end of file
diff --git a/vendor/github.com/coreos/etcd/LICENSE b/vendor/github.com/coreos/etcd/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/coreos/etcd/NOTICE b/vendor/github.com/coreos/etcd/NOTICE
new file mode 100644
index 0000000..b39ddfa
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/NOTICE
@@ -0,0 +1,5 @@
+CoreOS Project
+Copyright 2014 CoreOS, Inc
+
+This product includes software developed at CoreOS, Inc.
+(http://www.coreos.com/).
diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.proto b/vendor/github.com/coreos/etcd/auth/authpb/auth.proto
new file mode 100644
index 0000000..001d334
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/auth/authpb/auth.proto
@@ -0,0 +1,37 @@
+syntax = "proto3";
+package authpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+
+// User is a single entry in the bucket authUsers
+message User {
+  bytes name = 1;
+  bytes password = 2;
+  repeated string roles = 3;
+}
+
+// Permission is a single entity
+message Permission {
+  enum Type {
+    READ = 0;
+    WRITE = 1;
+    READWRITE = 2;
+  }
+  Type permType = 1;
+
+  bytes key = 2;
+  bytes range_end = 3;
+}
+
+// Role is a single entry in the bucket authRoles
+message Role {
+  bytes name = 1;
+
+  repeated Permission keyPermission = 2;
+}
diff --git a/vendor/github.com/coreos/etcd/cmd/etcd b/vendor/github.com/coreos/etcd/cmd/etcd
new file mode 120000
index 0000000..b870225
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/cmd/etcd
@@ -0,0 +1 @@
+../
\ No newline at end of file
diff --git a/vendor/github.com/coreos/etcd/cmd/etcdctl b/vendor/github.com/coreos/etcd/cmd/etcdctl
new file mode 120000
index 0000000..05bb269
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/cmd/etcdctl
@@ -0,0 +1 @@
+../etcdctl
\ No newline at end of file
diff --git a/vendor/github.com/coreos/etcd/cmd/functional b/vendor/github.com/coreos/etcd/cmd/functional
new file mode 120000
index 0000000..44faa31
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/cmd/functional
@@ -0,0 +1 @@
+../functional
\ No newline at end of file
diff --git a/vendor/github.com/coreos/etcd/cmd/tools b/vendor/github.com/coreos/etcd/cmd/tools
new file mode 120000
index 0000000..4887d6e
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/cmd/tools
@@ -0,0 +1 @@
+../tools
\ No newline at end of file
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go
new file mode 100644
index 0000000..f72c6a6
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package rpctypes has types and values shared by the etcd server and client for v3 RPC interaction.
+package rpctypes
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
new file mode 100644
index 0000000..55eab38
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
@@ -0,0 +1,215 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpctypes
+
+import (
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+// server-side error
+var (
+	ErrGRPCEmptyKey      = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err()
+	ErrGRPCKeyNotFound   = status.New(codes.InvalidArgument, "etcdserver: key not found").Err()
+	ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err()
+	ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err()
+	ErrGRPCTooManyOps    = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err()
+	ErrGRPCDuplicateKey  = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err()
+	ErrGRPCCompacted     = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err()
+	ErrGRPCFutureRev     = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err()
+	ErrGRPCNoSpace       = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err()
+
+	ErrGRPCLeaseNotFound    = status.New(codes.NotFound, "etcdserver: requested lease not found").Err()
+	ErrGRPCLeaseExist       = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err()
+	ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err()
+
+	ErrGRPCMemberExist            = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err()
+	ErrGRPCPeerURLExist           = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err()
+	ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err()
+	ErrGRPCMemberBadURLs          = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err()
+	ErrGRPCMemberNotFound         = status.New(codes.NotFound, "etcdserver: member not found").Err()
+
+	ErrGRPCRequestTooLarge        = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err()
+	ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err()
+
+	ErrGRPCRootUserNotExist     = status.New(codes.FailedPrecondition, "etcdserver: root user does not exist").Err()
+	ErrGRPCRootRoleNotExist     = status.New(codes.FailedPrecondition, "etcdserver: root user does not have root role").Err()
+	ErrGRPCUserAlreadyExist     = status.New(codes.FailedPrecondition, "etcdserver: user name already exists").Err()
+	ErrGRPCUserEmpty            = status.New(codes.InvalidArgument, "etcdserver: user name is empty").Err()
+	ErrGRPCUserNotFound         = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err()
+	ErrGRPCRoleAlreadyExist     = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err()
+	ErrGRPCRoleNotFound         = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err()
+	ErrGRPCAuthFailed           = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err()
+	ErrGRPCPermissionDenied     = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err()
+	ErrGRPCRoleNotGranted       = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err()
+	ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err()
+	ErrGRPCAuthNotEnabled       = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err()
+	ErrGRPCInvalidAuthToken     = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err()
+	ErrGRPCInvalidAuthMgmt      = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err()
+
+	ErrGRPCNoLeader                   = status.New(codes.Unavailable, "etcdserver: no leader").Err()
+	ErrGRPCNotLeader                  = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err()
+	ErrGRPCNotCapable                 = status.New(codes.Unavailable, "etcdserver: not capable").Err()
+	ErrGRPCStopped                    = status.New(codes.Unavailable, "etcdserver: server stopped").Err()
+	ErrGRPCTimeout                    = status.New(codes.Unavailable, "etcdserver: request timed out").Err()
+	ErrGRPCTimeoutDueToLeaderFail     = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err()
+	ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err()
+	ErrGRPCUnhealthy                  = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err()
+	ErrGRPCCorrupt                    = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err()
+
+	errStringToError = map[string]error{
+		ErrorDesc(ErrGRPCEmptyKey):      ErrGRPCEmptyKey,
+		ErrorDesc(ErrGRPCKeyNotFound):   ErrGRPCKeyNotFound,
+		ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided,
+		ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided,
+
+		ErrorDesc(ErrGRPCTooManyOps):   ErrGRPCTooManyOps,
+		ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
+		ErrorDesc(ErrGRPCCompacted):    ErrGRPCCompacted,
+		ErrorDesc(ErrGRPCFutureRev):    ErrGRPCFutureRev,
+		ErrorDesc(ErrGRPCNoSpace):      ErrGRPCNoSpace,
+
+		ErrorDesc(ErrGRPCLeaseNotFound):    ErrGRPCLeaseNotFound,
+		ErrorDesc(ErrGRPCLeaseExist):       ErrGRPCLeaseExist,
+		ErrorDesc(ErrGRPCLeaseTTLTooLarge): ErrGRPCLeaseTTLTooLarge,
+
+		ErrorDesc(ErrGRPCMemberExist):            ErrGRPCMemberExist,
+		ErrorDesc(ErrGRPCPeerURLExist):           ErrGRPCPeerURLExist,
+		ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted,
+		ErrorDesc(ErrGRPCMemberBadURLs):          ErrGRPCMemberBadURLs,
+		ErrorDesc(ErrGRPCMemberNotFound):         ErrGRPCMemberNotFound,
+
+		ErrorDesc(ErrGRPCRequestTooLarge):        ErrGRPCRequestTooLarge,
+		ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
+
+		ErrorDesc(ErrGRPCRootUserNotExist):     ErrGRPCRootUserNotExist,
+		ErrorDesc(ErrGRPCRootRoleNotExist):     ErrGRPCRootRoleNotExist,
+		ErrorDesc(ErrGRPCUserAlreadyExist):     ErrGRPCUserAlreadyExist,
+		ErrorDesc(ErrGRPCUserEmpty):            ErrGRPCUserEmpty,
+		ErrorDesc(ErrGRPCUserNotFound):         ErrGRPCUserNotFound,
+		ErrorDesc(ErrGRPCRoleAlreadyExist):     ErrGRPCRoleAlreadyExist,
+		ErrorDesc(ErrGRPCRoleNotFound):         ErrGRPCRoleNotFound,
+		ErrorDesc(ErrGRPCAuthFailed):           ErrGRPCAuthFailed,
+		ErrorDesc(ErrGRPCPermissionDenied):     ErrGRPCPermissionDenied,
+		ErrorDesc(ErrGRPCRoleNotGranted):       ErrGRPCRoleNotGranted,
+		ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
+		ErrorDesc(ErrGRPCAuthNotEnabled):       ErrGRPCAuthNotEnabled,
+		ErrorDesc(ErrGRPCInvalidAuthToken):     ErrGRPCInvalidAuthToken,
+		ErrorDesc(ErrGRPCInvalidAuthMgmt):      ErrGRPCInvalidAuthMgmt,
+
+		ErrorDesc(ErrGRPCNoLeader):                   ErrGRPCNoLeader,
+		ErrorDesc(ErrGRPCNotLeader):                  ErrGRPCNotLeader,
+		ErrorDesc(ErrGRPCNotCapable):                 ErrGRPCNotCapable,
+		ErrorDesc(ErrGRPCStopped):                    ErrGRPCStopped,
+		ErrorDesc(ErrGRPCTimeout):                    ErrGRPCTimeout,
+		ErrorDesc(ErrGRPCTimeoutDueToLeaderFail):     ErrGRPCTimeoutDueToLeaderFail,
+		ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost,
+		ErrorDesc(ErrGRPCUnhealthy):                  ErrGRPCUnhealthy,
+		ErrorDesc(ErrGRPCCorrupt):                    ErrGRPCCorrupt,
+	}
+)
+
+// client-side error
+var (
+	ErrEmptyKey      = Error(ErrGRPCEmptyKey)
+	ErrKeyNotFound   = Error(ErrGRPCKeyNotFound)
+	ErrValueProvided = Error(ErrGRPCValueProvided)
+	ErrLeaseProvided = Error(ErrGRPCLeaseProvided)
+	ErrTooManyOps    = Error(ErrGRPCTooManyOps)
+	ErrDuplicateKey  = Error(ErrGRPCDuplicateKey)
+	ErrCompacted     = Error(ErrGRPCCompacted)
+	ErrFutureRev     = Error(ErrGRPCFutureRev)
+	ErrNoSpace       = Error(ErrGRPCNoSpace)
+
+	ErrLeaseNotFound    = Error(ErrGRPCLeaseNotFound)
+	ErrLeaseExist       = Error(ErrGRPCLeaseExist)
+	ErrLeaseTTLTooLarge = Error(ErrGRPCLeaseTTLTooLarge)
+
+	ErrMemberExist            = Error(ErrGRPCMemberExist)
+	ErrPeerURLExist           = Error(ErrGRPCPeerURLExist)
+	ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted)
+	ErrMemberBadURLs          = Error(ErrGRPCMemberBadURLs)
+	ErrMemberNotFound         = Error(ErrGRPCMemberNotFound)
+
+	ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
+	ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests)
+
+	ErrRootUserNotExist     = Error(ErrGRPCRootUserNotExist)
+	ErrRootRoleNotExist     = Error(ErrGRPCRootRoleNotExist)
+	ErrUserAlreadyExist     = Error(ErrGRPCUserAlreadyExist)
+	ErrUserEmpty            = Error(ErrGRPCUserEmpty)
+	ErrUserNotFound         = Error(ErrGRPCUserNotFound)
+	ErrRoleAlreadyExist     = Error(ErrGRPCRoleAlreadyExist)
+	ErrRoleNotFound         = Error(ErrGRPCRoleNotFound)
+	ErrAuthFailed           = Error(ErrGRPCAuthFailed)
+	ErrPermissionDenied     = Error(ErrGRPCPermissionDenied)
+	ErrRoleNotGranted       = Error(ErrGRPCRoleNotGranted)
+	ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
+	ErrAuthNotEnabled       = Error(ErrGRPCAuthNotEnabled)
+	ErrInvalidAuthToken     = Error(ErrGRPCInvalidAuthToken)
+	ErrInvalidAuthMgmt      = Error(ErrGRPCInvalidAuthMgmt)
+
+	ErrNoLeader                   = Error(ErrGRPCNoLeader)
+	ErrNotLeader                  = Error(ErrGRPCNotLeader)
+	ErrNotCapable                 = Error(ErrGRPCNotCapable)
+	ErrStopped                    = Error(ErrGRPCStopped)
+	ErrTimeout                    = Error(ErrGRPCTimeout)
+	ErrTimeoutDueToLeaderFail     = Error(ErrGRPCTimeoutDueToLeaderFail)
+	ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost)
+	ErrUnhealthy                  = Error(ErrGRPCUnhealthy)
+	ErrCorrupt                    = Error(ErrGRPCCorrupt)
+)
+
+// EtcdError defines gRPC server errors.
+// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323)
+type EtcdError struct {
+	code codes.Code
+	desc string
+}
+
+// Code returns grpc/codes.Code.
+// TODO: define clientv3/codes.Code.
+func (e EtcdError) Code() codes.Code {
+	return e.code
+}
+
+func (e EtcdError) Error() string {
+	return e.desc
+}
+
+func Error(err error) error {
+	if err == nil {
+		return nil
+	}
+	verr, ok := errStringToError[ErrorDesc(err)]
+	if !ok { // not gRPC error
+		return err
+	}
+	ev, ok := status.FromError(verr)
+	var desc string
+	if ok {
+		desc = ev.Message()
+	} else {
+		desc = verr.Error()
+	}
+	return EtcdError{code: ev.Code(), desc: desc}
+}
+
+func ErrorDesc(err error) string {
+	if s, ok := status.FromError(err); ok {
+		return s.Message()
+	}
+	return err.Error()
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go
new file mode 100644
index 0000000..5c590e1
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go
@@ -0,0 +1,20 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpctypes
+
+var (
+	MetadataRequireLeaderKey = "hasleader"
+	MetadataHasLeader        = "true"
+)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto
new file mode 100644
index 0000000..25e0aca
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto
@@ -0,0 +1,34 @@
+syntax = "proto2";
+package etcdserverpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message Request {
+	optional uint64 ID         =  1 [(gogoproto.nullable) = false];
+	optional string Method     =  2 [(gogoproto.nullable) = false];
+	optional string Path       =  3 [(gogoproto.nullable) = false];
+	optional string Val        =  4 [(gogoproto.nullable) = false];
+	optional bool   Dir        =  5 [(gogoproto.nullable) = false];
+	optional string PrevValue  =  6 [(gogoproto.nullable) = false];
+	optional uint64 PrevIndex  =  7 [(gogoproto.nullable) = false];
+	optional bool   PrevExist  =  8 [(gogoproto.nullable) = true];
+	optional int64  Expiration =  9 [(gogoproto.nullable) = false];
+	optional bool   Wait       = 10 [(gogoproto.nullable) = false];
+	optional uint64 Since      = 11 [(gogoproto.nullable) = false];
+	optional bool   Recursive  = 12 [(gogoproto.nullable) = false];
+	optional bool   Sorted     = 13 [(gogoproto.nullable) = false];
+	optional bool   Quorum     = 14 [(gogoproto.nullable) = false];
+	optional int64  Time       = 15 [(gogoproto.nullable) = false];
+	optional bool   Stream     = 16 [(gogoproto.nullable) = false];
+	optional bool   Refresh    = 17 [(gogoproto.nullable) = true];
+}
+
+message Metadata {
+	optional uint64 NodeID    = 1 [(gogoproto.nullable) = false];
+	optional uint64 ClusterID = 2 [(gogoproto.nullable) = false];
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto
new file mode 100644
index 0000000..25d45d3
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto
@@ -0,0 +1,74 @@
+syntax = "proto3";
+package etcdserverpb;
+
+import "gogoproto/gogo.proto";
+import "etcdserver.proto";
+import "rpc.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message RequestHeader {
+  uint64 ID = 1;
+  // username is a username that is associated with an auth token of gRPC connection
+  string username = 2;
+  // auth_revision is a revision number of auth.authStore. It is not related to mvcc
+  uint64 auth_revision = 3;
+}
+
+// An InternalRaftRequest is the union of all requests which can be
+// sent via raft.
+message InternalRaftRequest {
+  RequestHeader header = 100;
+  uint64 ID = 1;
+
+  Request v2 = 2;
+
+  RangeRequest range = 3;
+  PutRequest put = 4;
+  DeleteRangeRequest delete_range = 5;
+  TxnRequest txn = 6;
+  CompactionRequest compaction = 7;
+
+  LeaseGrantRequest lease_grant = 8;
+  LeaseRevokeRequest lease_revoke = 9;
+
+  AlarmRequest alarm = 10;
+
+  AuthEnableRequest auth_enable = 1000;
+  AuthDisableRequest auth_disable = 1011;
+
+  InternalAuthenticateRequest authenticate = 1012;
+
+  AuthUserAddRequest auth_user_add = 1100;
+  AuthUserDeleteRequest auth_user_delete = 1101;
+  AuthUserGetRequest auth_user_get = 1102;
+  AuthUserChangePasswordRequest auth_user_change_password = 1103;
+  AuthUserGrantRoleRequest auth_user_grant_role = 1104;
+  AuthUserRevokeRoleRequest auth_user_revoke_role = 1105;
+  AuthUserListRequest auth_user_list = 1106;
+  AuthRoleListRequest auth_role_list = 1107;
+
+  AuthRoleAddRequest auth_role_add = 1200;
+  AuthRoleDeleteRequest auth_role_delete = 1201;
+  AuthRoleGetRequest auth_role_get = 1202;
+  AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203;
+  AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204;
+}
+
+message EmptyResponse {
+}
+
+// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
+// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
+// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
+message InternalAuthenticateRequest {
+  string name = 1;
+  string password = 2;
+
+  // simple_token is generated in API layer (etcdserver/v3_server.go)
+  string simple_token = 3;
+}
+
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
new file mode 100644
index 0000000..3d3536a
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
@@ -0,0 +1,183 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserverpb
+
+import (
+	"fmt"
+	"strings"
+
+	proto "github.com/golang/protobuf/proto"
+)
+
+// InternalRaftStringer implements custom proto Stringer:
+// redact password, replace value fields with value_size fields.
+type InternalRaftStringer struct {
+	Request *InternalRaftRequest
+}
+
+func (as *InternalRaftStringer) String() string {
+	switch {
+	case as.Request.LeaseGrant != nil:
+		return fmt.Sprintf("header:<%s> lease_grant:<ttl:%d-second id:%016x>",
+			as.Request.Header.String(),
+			as.Request.LeaseGrant.TTL,
+			as.Request.LeaseGrant.ID,
+		)
+	case as.Request.LeaseRevoke != nil:
+		return fmt.Sprintf("header:<%s> lease_revoke:<id:%016x>",
+			as.Request.Header.String(),
+			as.Request.LeaseRevoke.ID,
+		)
+	case as.Request.Authenticate != nil:
+		return fmt.Sprintf("header:<%s> authenticate:<name:%s simple_token:%s>",
+			as.Request.Header.String(),
+			as.Request.Authenticate.Name,
+			as.Request.Authenticate.SimpleToken,
+		)
+	case as.Request.AuthUserAdd != nil:
+		return fmt.Sprintf("header:<%s> auth_user_add:<name:%s>",
+			as.Request.Header.String(),
+			as.Request.AuthUserAdd.Name,
+		)
+	case as.Request.AuthUserChangePassword != nil:
+		return fmt.Sprintf("header:<%s> auth_user_change_password:<name:%s>",
+			as.Request.Header.String(),
+			as.Request.AuthUserChangePassword.Name,
+		)
+	case as.Request.Put != nil:
+		return fmt.Sprintf("header:<%s> put:<%s>",
+			as.Request.Header.String(),
+			NewLoggablePutRequest(as.Request.Put).String(),
+		)
+	case as.Request.Txn != nil:
+		return fmt.Sprintf("header:<%s> txn:<%s>",
+			as.Request.Header.String(),
+			NewLoggableTxnRequest(as.Request.Txn).String(),
+		)
+	default:
+		// nothing to redact
+	}
+	return as.Request.String()
+}
+
+// txnRequestStringer implements a custom proto String to replace value bytes fields with value size
+// fields in any nested txn and put operations.
+type txnRequestStringer struct {
+	Request *TxnRequest
+}
+
+func NewLoggableTxnRequest(request *TxnRequest) *txnRequestStringer {
+	return &txnRequestStringer{request}
+}
+
+func (as *txnRequestStringer) String() string {
+	var compare []string
+	for _, c := range as.Request.Compare {
+		switch cv := c.TargetUnion.(type) {
+		case *Compare_Value:
+			compare = append(compare, newLoggableValueCompare(c, cv).String())
+		default:
+			// nothing to redact
+			compare = append(compare, c.String())
+		}
+	}
+	var success []string
+	for _, s := range as.Request.Success {
+		success = append(success, newLoggableRequestOp(s).String())
+	}
+	var failure []string
+	for _, f := range as.Request.Failure {
+		failure = append(failure, newLoggableRequestOp(f).String())
+	}
+	return fmt.Sprintf("compare:<%s> success:<%s> failure:<%s>",
+		strings.Join(compare, " "),
+		strings.Join(success, " "),
+		strings.Join(failure, " "),
+	)
+}
+
+// requestOpStringer implements a custom proto String to replace value bytes fields with value
+// size fields in any nested txn and put operations.
+type requestOpStringer struct {
+	Op *RequestOp
+}
+
+func newLoggableRequestOp(op *RequestOp) *requestOpStringer {
+	return &requestOpStringer{op}
+}
+
+func (as *requestOpStringer) String() string {
+	switch op := as.Op.Request.(type) {
+	case *RequestOp_RequestPut:
+		return fmt.Sprintf("request_put:<%s>", NewLoggablePutRequest(op.RequestPut).String())
+	case *RequestOp_RequestTxn:
+		return fmt.Sprintf("request_txn:<%s>", NewLoggableTxnRequest(op.RequestTxn).String())
+	default:
+		// nothing to redact
+	}
+	return as.Op.String()
+}
+
+// loggableValueCompare implements a custom proto String for Compare.Value union member types to
+// replace the value bytes field with a value size field.
+// To preserve proto encoding of the key and range_end bytes, a faked out proto type is used here.
+type loggableValueCompare struct {
+	Result    Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult"`
+	Target    Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget"`
+	Key       []byte                `protobuf:"bytes,3,opt,name=key,proto3"`
+	ValueSize int                   `protobuf:"bytes,7,opt,name=value_size,proto3"`
+	RangeEnd  []byte                `protobuf:"bytes,64,opt,name=range_end,proto3"`
+}
+
+func newLoggableValueCompare(c *Compare, cv *Compare_Value) *loggableValueCompare {
+	return &loggableValueCompare{
+		c.Result,
+		c.Target,
+		c.Key,
+		len(cv.Value),
+		c.RangeEnd,
+	}
+}
+
+func (m *loggableValueCompare) Reset()         { *m = loggableValueCompare{} }
+func (m *loggableValueCompare) String() string { return proto.CompactTextString(m) }
+func (*loggableValueCompare) ProtoMessage()    {}
+
+// loggablePutRequest implements a custom proto String to replace value bytes field with a value
+// size field.
+// To preserve proto encoding of the key bytes, a faked out proto type is used here.
+type loggablePutRequest struct {
+	Key         []byte `protobuf:"bytes,1,opt,name=key,proto3"`
+	ValueSize   int    `protobuf:"varint,2,opt,name=value_size,proto3"`
+	Lease       int64  `protobuf:"varint,3,opt,name=lease,proto3"`
+	PrevKv      bool   `protobuf:"varint,4,opt,name=prev_kv,proto3"`
+	IgnoreValue bool   `protobuf:"varint,5,opt,name=ignore_value,proto3"`
+	IgnoreLease bool   `protobuf:"varint,6,opt,name=ignore_lease,proto3"`
+}
+
+func NewLoggablePutRequest(request *PutRequest) *loggablePutRequest {
+	return &loggablePutRequest{
+		request.Key,
+		len(request.Value),
+		request.Lease,
+		request.PrevKv,
+		request.IgnoreValue,
+		request.IgnoreLease,
+	}
+}
+
+func (m *loggablePutRequest) Reset()         { *m = loggablePutRequest{} }
+func (m *loggablePutRequest) String() string { return proto.CompactTextString(m) }
+func (*loggablePutRequest) ProtoMessage()    {}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
new file mode 100644
index 0000000..e80e6e7
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
@@ -0,0 +1,1053 @@
+syntax = "proto3";
+package etcdserverpb;
+
+import "gogoproto/gogo.proto";
+import "etcd/mvcc/mvccpb/kv.proto";
+import "etcd/auth/authpb/auth.proto";
+
+// for grpc-gateway
+import "google/api/annotations.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+service KV {
+  // Range gets the keys in the range from the key-value store.
+  rpc Range(RangeRequest) returns (RangeResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/range"
+        body: "*"
+    };
+  }
+
+  // Put puts the given key into the key-value store.
+  // A put request increments the revision of the key-value store
+  // and generates one event in the event history.
+  rpc Put(PutRequest) returns (PutResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/put"
+        body: "*"
+    };
+  }
+
+  // DeleteRange deletes the given range from the key-value store.
+  // A delete request increments the revision of the key-value store
+  // and generates a delete event in the event history for every deleted key.
+  rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/deleterange"
+        body: "*"
+    };
+  }
+
+  // Txn processes multiple requests in a single transaction.
+  // A txn request increments the revision of the key-value store
+  // and generates events with the same revision for every completed request.
+  // It is not allowed to modify the same key several times within one txn.
+  rpc Txn(TxnRequest) returns (TxnResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/txn"
+        body: "*"
+    };
+  }
+
+  // Compact compacts the event history in the etcd key-value store. The key-value
+  // store should be periodically compacted or the event history will continue to grow
+  // indefinitely.
+  rpc Compact(CompactionRequest) returns (CompactionResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/compaction"
+        body: "*"
+    };
+  }
+}
+
+service Watch {
+  // Watch watches for events happening or that have happened. Both input and output
+  // are streams; the input stream is for creating and canceling watchers and the output
+  // stream sends events. One watch RPC can watch on multiple key ranges, streaming events
+  // for several watches at once. The entire event history can be watched starting from the
+  // last compaction revision.
+  rpc Watch(stream WatchRequest) returns (stream WatchResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/watch"
+        body: "*"
+    };
+  }
+}
+
+service Lease {
+  // LeaseGrant creates a lease which expires if the server does not receive a keepAlive
+  // within a given time to live period. All keys attached to the lease will be expired and
+  // deleted if the lease expires. Each expired key generates a delete event in the event history.
+  rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/lease/grant"
+        body: "*"
+    };
+  }
+
+  // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
+  rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/lease/revoke"
+        body: "*"
+    };
+  }
+
+  // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
+  // to the server and streaming keep alive responses from the server to the client.
+  rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/lease/keepalive"
+        body: "*"
+    };
+  }
+
+  // LeaseTimeToLive retrieves lease information.
+  rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/lease/timetolive"
+        body: "*"
+    };
+  }
+
+  // LeaseLeases lists all existing leases.
+  rpc LeaseLeases(LeaseLeasesRequest) returns (LeaseLeasesResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/lease/leases"
+        body: "*"
+    };
+  }
+}
+
+service Cluster {
+  // MemberAdd adds a member into the cluster.
+  rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/cluster/member/add"
+        body: "*"
+    };
+  }
+
+  // MemberRemove removes an existing member from the cluster.
+  rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/cluster/member/remove"
+        body: "*"
+    };
+  }
+
+  // MemberUpdate updates the member configuration.
+  rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/cluster/member/update"
+        body: "*"
+    };
+  }
+
+  // MemberList lists all the members in the cluster.
+  rpc MemberList(MemberListRequest) returns (MemberListResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/cluster/member/list"
+        body: "*"
+    };
+  }
+}
+
+service Maintenance {
+  // Alarm activates, deactivates, and queries alarms regarding cluster health.
+  rpc Alarm(AlarmRequest) returns (AlarmResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/maintenance/alarm"
+        body: "*"
+    };
+  }
+
+  // Status gets the status of the member.
+  rpc Status(StatusRequest) returns (StatusResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/maintenance/status"
+        body: "*"
+    };
+  }
+
+  // Defragment defragments a member's backend database to recover storage space.
+  rpc Defragment(DefragmentRequest) returns (DefragmentResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/maintenance/defragment"
+        body: "*"
+    };
+  }
+
+  // Hash computes the hash of the KV's backend.
+  // This is designed for testing; do not use this in production when there
+  // are ongoing transactions.
+  rpc Hash(HashRequest) returns (HashResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/maintenance/hash"
+        body: "*"
+    };
+  }
+
+  // HashKV computes the hash of all MVCC keys up to a given revision.
+  rpc HashKV(HashKVRequest) returns (HashKVResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/maintenance/hash"
+        body: "*"
+    };
+  }
+
+  // Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
+  rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/maintenance/snapshot"
+        body: "*"
+    };
+  }
+
+  // MoveLeader requests current leader node to transfer its leadership to transferee.
+  rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/maintenance/transfer-leadership"
+        body: "*"
+    };
+  }
+}
+
+service Auth {
+  // AuthEnable enables authentication.
+  rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/enable"
+        body: "*"
+    };
+  }
+
+  // AuthDisable disables authentication.
+  rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/disable"
+        body: "*"
+    };
+  }
+
+  // Authenticate processes an authenticate request.
+  rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/authenticate"
+        body: "*"
+    };
+  }
+
+  // UserAdd adds a new user.
+  rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/user/add"
+        body: "*"
+    };
+  }
+
+  // UserGet gets detailed user information.
+  rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/user/get"
+        body: "*"
+    };
+  }
+
+  // UserList gets a list of all users.
+  rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/user/list"
+        body: "*"
+    };
+  }
+
+  // UserDelete deletes a specified user.
+  rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/user/delete"
+        body: "*"
+    };
+  }
+
+  // UserChangePassword changes the password of a specified user.
+  rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/user/changepw"
+        body: "*"
+    };
+  }
+
+  // UserGrant grants a role to a specified user.
+  rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/user/grant"
+        body: "*"
+    };
+  }
+
+  // UserRevokeRole revokes a role of specified user.
+  rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/user/revoke"
+        body: "*"
+    };
+  }
+
+  // RoleAdd adds a new role.
+  rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/role/add"
+        body: "*"
+    };
+  }
+
+  // RoleGet gets detailed role information.
+  rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/role/get"
+        body: "*"
+    };
+  }
+
+  // RoleList gets lists of all roles.
+  rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/role/list"
+        body: "*"
+    };
+  }
+
+  // RoleDelete deletes a specified role.
+  rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/role/delete"
+        body: "*"
+    };
+  }
+
+  // RoleGrantPermission grants a permission of a specified key or range to a specified role.
+  rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/role/grant"
+        body: "*"
+    };
+  }
+
+  // RoleRevokePermission revokes a key or range permission of a specified role.
+  rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/role/revoke"
+        body: "*"
+    };
+  }
+}
+
+message ResponseHeader {
+  // cluster_id is the ID of the cluster which sent the response.
+  uint64 cluster_id = 1;
+  // member_id is the ID of the member which sent the response.
+  uint64 member_id = 2;
+  // revision is the key-value store revision when the request was applied.
+  int64 revision = 3;
+  // raft_term is the raft term when the request was applied.
+  uint64 raft_term = 4;
+}
+
+message RangeRequest {
+  enum SortOrder {
+	NONE = 0; // default, no sorting
+	ASCEND = 1; // lowest target value first
+	DESCEND = 2; // highest target value first
+  }
+  enum SortTarget {
+	KEY = 0;
+	VERSION = 1;
+	CREATE = 2;
+	MOD = 3;
+	VALUE = 4;
+  }
+
+  // key is the first key for the range. If range_end is not given, the request only looks up key.
+  bytes key = 1;
+  // range_end is the upper bound on the requested range [key, range_end).
+  // If range_end is '\0', the range is all keys >= key.
+  // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
+  // then the range request gets all keys prefixed with key.
+  // If both key and range_end are '\0', then the range request returns all keys.
+  bytes range_end = 2;
+  // limit is a limit on the number of keys returned for the request. When limit is set to 0,
+  // it is treated as no limit.
+  int64 limit = 3;
+  // revision is the point-in-time of the key-value store to use for the range.
+  // If revision is less or equal to zero, the range is over the newest key-value store.
+  // If the revision has been compacted, ErrCompacted is returned as a response.
+  int64 revision = 4;
+
+  // sort_order is the order for returned sorted results.
+  SortOrder sort_order = 5;
+
+  // sort_target is the key-value field to use for sorting.
+  SortTarget sort_target = 6;
+
+  // serializable sets the range request to use serializable member-local reads.
+  // Range requests are linearizable by default; linearizable requests have higher
+  // latency and lower throughput than serializable requests but reflect the current
+  // consensus of the cluster. For better performance, in exchange for possible stale reads,
+  // a serializable range request is served locally without needing to reach consensus
+  // with other nodes in the cluster.
+  bool serializable = 7;
+
+  // keys_only when set returns only the keys and not the values.
+  bool keys_only = 8;
+
+  // count_only when set returns only the count of the keys in the range.
+  bool count_only = 9;
+
+  // min_mod_revision is the lower bound for returned key mod revisions; all keys with
+  // lesser mod revisions will be filtered away.
+  int64 min_mod_revision = 10;
+
+  // max_mod_revision is the upper bound for returned key mod revisions; all keys with
+  // greater mod revisions will be filtered away.
+  int64 max_mod_revision = 11;
+
+  // min_create_revision is the lower bound for returned key create revisions; all keys with
+  // lesser create trevisions will be filtered away.
+  int64 min_create_revision = 12;
+
+  // max_create_revision is the upper bound for returned key create revisions; all keys with
+  // greater create revisions will be filtered away.
+  int64 max_create_revision = 13;
+}
+
+message RangeResponse {
+  ResponseHeader header = 1;
+  // kvs is the list of key-value pairs matched by the range request.
+  // kvs is empty when count is requested.
+  repeated mvccpb.KeyValue kvs = 2;
+  // more indicates if there are more keys to return in the requested range.
+  bool more = 3;
+  // count is set to the number of keys within the range when requested.
+  int64 count = 4;
+}
+
+message PutRequest {
+  // key is the key, in bytes, to put into the key-value store.
+  bytes key = 1;
+  // value is the value, in bytes, to associate with the key in the key-value store.
+  bytes value = 2;
+  // lease is the lease ID to associate with the key in the key-value store. A lease
+  // value of 0 indicates no lease.
+  int64 lease = 3;
+
+  // If prev_kv is set, etcd gets the previous key-value pair before changing it.
+  // The previous key-value pair will be returned in the put response.
+  bool prev_kv = 4;
+
+  // If ignore_value is set, etcd updates the key using its current value.
+  // Returns an error if the key does not exist.
+  bool ignore_value = 5;
+
+  // If ignore_lease is set, etcd updates the key using its current lease.
+  // Returns an error if the key does not exist.
+  bool ignore_lease = 6;
+}
+
+message PutResponse {
+  ResponseHeader header = 1;
+  // if prev_kv is set in the request, the previous key-value pair will be returned.
+  mvccpb.KeyValue prev_kv = 2;
+}
+
+message DeleteRangeRequest {
+  // key is the first key to delete in the range.
+  bytes key = 1;
+  // range_end is the key following the last key to delete for the range [key, range_end).
+  // If range_end is not given, the range is defined to contain only the key argument.
+  // If range_end is one bit larger than the given key, then the range is all the keys
+  // with the prefix (the given key).
+  // If range_end is '\0', the range is all keys greater than or equal to the key argument.
+  bytes range_end = 2;
+
+  // If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
+  // The previous key-value pairs will be returned in the delete response.
+  bool prev_kv = 3;
+}
+
+message DeleteRangeResponse {
+  ResponseHeader header = 1;
+  // deleted is the number of keys deleted by the delete range request.
+  int64 deleted = 2;
+  // if prev_kv is set in the request, the previous key-value pairs will be returned.
+  repeated mvccpb.KeyValue prev_kvs = 3;
+}
+
+message RequestOp {
+  // request is a union of request types accepted by a transaction.
+  oneof request {
+    RangeRequest request_range = 1;
+    PutRequest request_put = 2;
+    DeleteRangeRequest request_delete_range = 3;
+    TxnRequest request_txn = 4;
+  }
+}
+
+message ResponseOp {
+  // response is a union of response types returned by a transaction.
+  oneof response {
+    RangeResponse response_range = 1;
+    PutResponse response_put = 2;
+    DeleteRangeResponse response_delete_range = 3;
+    TxnResponse response_txn = 4;
+  }
+}
+
+message Compare {
+  enum CompareResult {
+    EQUAL = 0;
+    GREATER = 1;
+    LESS = 2;
+    NOT_EQUAL = 3;
+  }
+  enum CompareTarget {
+    VERSION = 0;
+    CREATE = 1;
+    MOD = 2;
+    VALUE= 3;
+    LEASE = 4;
+  }
+  // result is logical comparison operation for this comparison.
+  CompareResult result = 1;
+  // target is the key-value field to inspect for the comparison.
+  CompareTarget target = 2;
+  // key is the subject key for the comparison operation.
+  bytes key = 3;
+  oneof target_union {
+    // version is the version of the given key
+    int64 version = 4;
+    // create_revision is the creation revision of the given key
+    int64 create_revision = 5;
+    // mod_revision is the last modified revision of the given key.
+    int64 mod_revision = 6;
+    // value is the value of the given key, in bytes.
+    bytes value = 7;
+    // lease is the lease id of the given key.
+    int64 lease = 8;
+    // leave room for more target_union field tags, jump to 64
+  }
+
+  // range_end compares the given target to all keys in the range [key, range_end).
+  // See RangeRequest for more details on key ranges.
+  bytes range_end = 64;
+  // TODO: fill out with most of the rest of RangeRequest fields when needed.
+}
+
+// From google paxosdb paper:
+// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
+// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
+// and consists of three components:
+// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
+// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
+// may apply to the same or different entries in the database. All tests in the guard are applied and
+// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
+// it executes f op (see item 3 below).
+// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
+// lookup operation, and applies to a single database entry. Two different operations in the list may apply
+// to the same or different entries in the database. These operations are executed
+// if guard evaluates to
+// true.
+// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
+message TxnRequest {
+  // compare is a list of predicates representing a conjunction of terms.
+  // If the comparisons succeed, then the success requests will be processed in order,
+  // and the response will contain their respective responses in order.
+  // If the comparisons fail, then the failure requests will be processed in order,
+  // and the response will contain their respective responses in order.
+  repeated Compare compare = 1;
+  // success is a list of requests which will be applied when compare evaluates to true.
+  repeated RequestOp success = 2;
+  // failure is a list of requests which will be applied when compare evaluates to false.
+  repeated RequestOp failure = 3;
+}
+
+message TxnResponse {
+  ResponseHeader header = 1;
+  // succeeded is set to true if the compare evaluated to true or false otherwise.
+  bool succeeded = 2;
+  // responses is a list of responses corresponding to the results from applying
+  // success if succeeded is true or failure if succeeded is false.
+  repeated ResponseOp responses = 3;
+}
+
+// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
+// with a revision less than the compaction revision will be removed.
+message CompactionRequest {
+  // revision is the key-value store revision for the compaction operation.
+  int64 revision = 1;
+  // physical is set so the RPC will wait until the compaction is physically
+  // applied to the local database such that compacted entries are totally
+  // removed from the backend database.
+  bool physical = 2;
+}
+
+message CompactionResponse {
+  ResponseHeader header = 1;
+}
+
+message HashRequest {
+}
+
+message HashKVRequest {
+  // revision is the key-value store revision for the hash operation.
+  int64 revision = 1;
+}
+
+message HashKVResponse {
+  ResponseHeader header = 1;
+  // hash is the hash value computed from the responding member's MVCC keys up to a given revision.
+  uint32 hash = 2;
+  // compact_revision is the compacted revision of key-value store when hash begins.
+  int64 compact_revision = 3;
+}
+
+message HashResponse {
+  ResponseHeader header = 1;
+  // hash is the hash value computed from the responding member's KV's backend.
+  uint32 hash = 2;
+}
+
+message SnapshotRequest {
+}
+
+message SnapshotResponse {
+  // header has the current key-value store information. The first header in the snapshot
+  // stream indicates the point in time of the snapshot.
+  ResponseHeader header = 1;
+
+  // remaining_bytes is the number of blob bytes to be sent after this message
+  uint64 remaining_bytes = 2;
+
+  // blob contains the next chunk of the snapshot in the snapshot stream.
+  bytes blob = 3;
+}
+
+message WatchRequest {
+  // request_union is a request to either create a new watcher or cancel an existing watcher.
+  oneof request_union {
+    WatchCreateRequest create_request = 1;
+    WatchCancelRequest cancel_request = 2;
+  }
+}
+
+message WatchCreateRequest {
+  // key is the key to register for watching.
+  bytes key = 1;
+  // range_end is the end of the range [key, range_end) to watch. If range_end is not given,
+  // only the key argument is watched. If range_end is equal to '\0', all keys greater than
+  // or equal to the key argument are watched.
+  // If the range_end is one bit larger than the given key,
+  // then all keys with the prefix (the given key) will be watched.
+  bytes range_end = 2;
+  // start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
+  int64 start_revision = 3;
+  // progress_notify is set so that the etcd server will periodically send a WatchResponse with
+  // no events to the new watcher if there are no recent events. It is useful when clients
+  // wish to recover a disconnected watcher starting from a recent known revision.
+  // The etcd server may decide how often it will send notifications based on current load.
+  bool progress_notify = 4;
+
+  enum FilterType {
+  // filter out put event.
+  NOPUT = 0;
+  // filter out delete event.
+  NODELETE = 1;
+  }
+  // filters filter the events at server side before it sends back to the watcher.
+  repeated FilterType filters = 5;
+
+  // If prev_kv is set, created watcher gets the previous KV before the event happens.
+  // If the previous KV is already compacted, nothing will be returned.
+  bool prev_kv = 6;
+}
+
+message WatchCancelRequest {
+  // watch_id is the watcher id to cancel so that no more events are transmitted.
+  int64 watch_id = 1;
+}
+
+message WatchResponse {
+  ResponseHeader header = 1;
+  // watch_id is the ID of the watcher that corresponds to the response.
+  int64 watch_id = 2;
+  // created is set to true if the response is for a create watch request.
+  // The client should record the watch_id and expect to receive events for
+  // the created watcher from the same stream.
+  // All events sent to the created watcher will attach with the same watch_id.
+  bool created = 3;
+  // canceled is set to true if the response is for a cancel watch request.
+  // No further events will be sent to the canceled watcher.
+  bool canceled = 4;
+  // compact_revision is set to the minimum index if a watcher tries to watch
+  // at a compacted index.
+  //
+  // This happens when creating a watcher at a compacted revision or the watcher cannot
+  // catch up with the progress of the key-value store.
+  //
+  // The client should treat the watcher as canceled and should not try to create any
+  // watcher with the same start_revision again.
+  int64 compact_revision  = 5;
+
+  // cancel_reason indicates the reason for canceling the watcher.
+  string cancel_reason = 6;
+
+  repeated mvccpb.Event events = 11;
+}
+
+message LeaseGrantRequest {
+  // TTL is the advisory time-to-live in seconds. Expired lease will return -1.
+  int64 TTL = 1;
+  // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
+  int64 ID = 2;
+}
+
+message LeaseGrantResponse {
+  ResponseHeader header = 1;
+  // ID is the lease ID for the granted lease.
+  int64 ID = 2;
+  // TTL is the server chosen lease time-to-live in seconds.
+  int64 TTL = 3;
+  string error = 4;
+}
+
+message LeaseRevokeRequest {
+  // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
+  int64 ID = 1;
+}
+
+message LeaseRevokeResponse {
+  ResponseHeader header = 1;
+}
+
+message LeaseKeepAliveRequest {
+  // ID is the lease ID for the lease to keep alive.
+  int64 ID = 1;
+}
+
+message LeaseKeepAliveResponse {
+  ResponseHeader header = 1;
+  // ID is the lease ID from the keep alive request.
+  int64 ID = 2;
+  // TTL is the new time-to-live for the lease.
+  int64 TTL = 3;
+}
+
+message LeaseTimeToLiveRequest {
+  // ID is the lease ID for the lease.
+  int64 ID = 1;
+  // keys is true to query all the keys attached to this lease.
+  bool keys = 2;
+}
+
+message LeaseTimeToLiveResponse {
+  ResponseHeader header = 1;
+  // ID is the lease ID from the keep alive request.
+  int64 ID = 2;
+  // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
+  int64 TTL = 3;
+  // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
+  int64 grantedTTL = 4;
+  // Keys is the list of keys attached to this lease.
+  repeated bytes keys = 5;
+}
+
+message LeaseLeasesRequest {
+}
+
+message LeaseStatus {
+  int64 ID = 1;
+  // TODO: int64 TTL = 2;
+}
+
+message LeaseLeasesResponse {
+  ResponseHeader header = 1;
+  repeated LeaseStatus leases = 2;
+}
+
+message Member {
+  // ID is the member ID for this member.
+  uint64 ID = 1;
+  // name is the human-readable name of the member. If the member is not started, the name will be an empty string.
+  string name = 2;
+  // peerURLs is the list of URLs the member exposes to the cluster for communication.
+  repeated string peerURLs = 3;
+  // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
+  repeated string clientURLs = 4;
+}
+
+message MemberAddRequest {
+  // peerURLs is the list of URLs the added member will use to communicate with the cluster.
+  repeated string peerURLs = 1;
+}
+
+message MemberAddResponse {
+  ResponseHeader header = 1;
+  // member is the member information for the added member.
+  Member member = 2;
+  // members is a list of all members after adding the new member.
+  repeated Member members = 3;
+}
+
+message MemberRemoveRequest {
+  // ID is the member ID of the member to remove.
+  uint64 ID = 1;
+}
+
+message MemberRemoveResponse {
+  ResponseHeader header = 1;
+  // members is a list of all members after removing the member.
+  repeated Member members = 2;
+}
+
+message MemberUpdateRequest {
+  // ID is the member ID of the member to update.
+  uint64 ID = 1;
+  // peerURLs is the new list of URLs the member will use to communicate with the cluster.
+  repeated string peerURLs = 2;
+}
+
+message MemberUpdateResponse{
+  ResponseHeader header = 1;
+  // members is a list of all members after updating the member.
+  repeated Member members = 2;
+}
+
+message MemberListRequest {
+}
+
+message MemberListResponse {
+  ResponseHeader header = 1;
+  // members is a list of all members associated with the cluster.
+  repeated Member members = 2;
+}
+
+message DefragmentRequest {
+}
+
+message DefragmentResponse {
+  ResponseHeader header = 1;
+}
+
+message MoveLeaderRequest {
+  // targetID is the node ID for the new leader.
+  uint64 targetID = 1;
+}
+
+message MoveLeaderResponse {
+  ResponseHeader header = 1;
+}
+
+enum AlarmType {
+	NONE = 0; // default, used to query if any alarm is active
+	NOSPACE = 1; // space quota is exhausted
+	CORRUPT = 2; // kv store corruption detected
+}
+
+message AlarmRequest {
+  enum AlarmAction {
+	GET = 0;
+	ACTIVATE = 1;
+	DEACTIVATE = 2;
+  }
+  // action is the kind of alarm request to issue. The action
+  // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
+  // raised alarm.
+  AlarmAction action = 1;
+  // memberID is the ID of the member associated with the alarm. If memberID is 0, the
+  // alarm request covers all members.
+  uint64 memberID = 2;
+  // alarm is the type of alarm to consider for this request.
+  AlarmType alarm = 3;
+}
+
+message AlarmMember {
+  // memberID is the ID of the member associated with the raised alarm.
+  uint64 memberID = 1;
+  // alarm is the type of alarm which has been raised.
+  AlarmType alarm = 2;
+}
+
+message AlarmResponse {
+  ResponseHeader header = 1;
+  // alarms is a list of alarms associated with the alarm request.
+  repeated AlarmMember alarms = 2;
+}
+
+message StatusRequest {
+}
+
+message StatusResponse {
+  ResponseHeader header = 1;
+  // version is the cluster protocol version used by the responding member.
+  string version = 2;
+  // dbSize is the size of the backend database, in bytes, of the responding member.
+  int64 dbSize = 3;
+  // leader is the member ID which the responding member believes is the current leader.
+  uint64 leader = 4;
+  // raftIndex is the current raft index of the responding member.
+  uint64 raftIndex = 5;
+  // raftTerm is the current raft term of the responding member.
+  uint64 raftTerm = 6;
+}
+
+message AuthEnableRequest {
+}
+
+message AuthDisableRequest {
+}
+
+message AuthenticateRequest {
+  string name = 1;
+  string password = 2;
+}
+
+message AuthUserAddRequest {
+  string name = 1;
+  string password = 2;
+}
+
+message AuthUserGetRequest {
+  string name = 1;
+}
+
+message AuthUserDeleteRequest {
+  // name is the name of the user to delete.
+  string name = 1;
+}
+
+message AuthUserChangePasswordRequest {
+  // name is the name of the user whose password is being changed.
+  string name = 1;
+  // password is the new password for the user.
+  string password = 2;
+}
+
+message AuthUserGrantRoleRequest {
+  // user is the name of the user which should be granted a given role.
+  string user = 1;
+  // role is the name of the role to grant to the user.
+  string role = 2;
+}
+
+message AuthUserRevokeRoleRequest {
+  string name = 1;
+  string role = 2;
+}
+
+message AuthRoleAddRequest {
+  // name is the name of the role to add to the authentication system.
+  string name = 1;
+}
+
+message AuthRoleGetRequest {
+  string role = 1;
+}
+
+message AuthUserListRequest {
+}
+
+message AuthRoleListRequest {
+}
+
+message AuthRoleDeleteRequest {
+  string role = 1;
+}
+
+message AuthRoleGrantPermissionRequest {
+  // name is the name of the role which will be granted the permission.
+  string name = 1;
+  // perm is the permission to grant to the role.
+  authpb.Permission perm = 2;
+}
+
+message AuthRoleRevokePermissionRequest {
+  string role = 1;
+  string key = 2;
+  string range_end = 3;
+}
+
+message AuthEnableResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthDisableResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthenticateResponse {
+  ResponseHeader header = 1;
+  // token is an authorized token that can be used in succeeding RPCs
+  string token = 2;
+}
+
+message AuthUserAddResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthUserGetResponse {
+  ResponseHeader header = 1;
+
+  repeated string roles = 2;
+}
+
+message AuthUserDeleteResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthUserChangePasswordResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthUserGrantRoleResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthUserRevokeRoleResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthRoleAddResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthRoleGetResponse {
+  ResponseHeader header = 1;
+
+  repeated authpb.Permission perm = 2;
+}
+
+message AuthRoleListResponse {
+  ResponseHeader header = 1;
+
+  repeated string roles = 2;
+}
+
+message AuthUserListResponse {
+  ResponseHeader header = 1;
+
+  repeated string users = 2;
+}
+
+message AuthRoleDeleteResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthRoleGrantPermissionResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthRoleRevokePermissionResponse {
+  ResponseHeader header = 1;
+}
diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto
new file mode 100644
index 0000000..23c911b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto
@@ -0,0 +1,49 @@
+syntax = "proto3";
+package mvccpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+
+message KeyValue {
+  // key is the key in bytes. An empty key is not allowed.
+  bytes key = 1;
+  // create_revision is the revision of last creation on this key.
+  int64 create_revision = 2;
+  // mod_revision is the revision of last modification on this key.
+  int64 mod_revision = 3;
+  // version is the version of the key. A deletion resets
+  // the version to zero and any modification of the key
+  // increases its version.
+  int64 version = 4;
+  // value is the value held by the key, in bytes.
+  bytes value = 5;
+  // lease is the ID of the lease that attached to key.
+  // When the attached lease expires, the key will be deleted.
+  // If lease is 0, then no lease is attached to the key.
+  int64 lease = 6;
+}
+
+message Event {
+  enum EventType {
+    PUT = 0;
+    DELETE = 1;
+  }
+  // type is the kind of event. If type is a PUT, it indicates
+  // new data has been stored to the key. If type is a DELETE,
+  // it indicates the key was deleted.
+  EventType type = 1;
+  // kv holds the KeyValue for the event.
+  // A PUT event contains current kv pair.
+  // A PUT event with kv.Version=1 indicates the creation of a key.
+  // A DELETE/EXPIRE event contains the deleted key with
+  // its modification revision set to the revision of deletion.
+  KeyValue kv = 2;
+
+  // prev_kv holds the key-value pair before the event happens.
+  KeyValue prev_kv = 3;
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/types/doc.go b/vendor/github.com/coreos/etcd/pkg/types/doc.go
new file mode 100644
index 0000000..de8ef0b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/types/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package types declares various data types and implements type-checking
+// functions.
+package types
diff --git a/vendor/github.com/coreos/etcd/pkg/types/id.go b/vendor/github.com/coreos/etcd/pkg/types/id.go
new file mode 100644
index 0000000..1b042d9
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/types/id.go
@@ -0,0 +1,41 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+	"strconv"
+)
+
+// ID represents a generic identifier which is canonically
+// stored as a uint64 but is typically represented as a
+// base-16 string for input/output
+type ID uint64
+
+func (i ID) String() string {
+	return strconv.FormatUint(uint64(i), 16)
+}
+
+// IDFromString attempts to create an ID from a base-16 string.
+func IDFromString(s string) (ID, error) {
+	i, err := strconv.ParseUint(s, 16, 64)
+	return ID(i), err
+}
+
+// IDSlice implements the sort interface
+type IDSlice []ID
+
+func (p IDSlice) Len() int           { return len(p) }
+func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) }
+func (p IDSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/coreos/etcd/pkg/types/set.go b/vendor/github.com/coreos/etcd/pkg/types/set.go
new file mode 100644
index 0000000..c111b0c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/types/set.go
@@ -0,0 +1,178 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+	"reflect"
+	"sort"
+	"sync"
+)
+
+type Set interface {
+	Add(string)
+	Remove(string)
+	Contains(string) bool
+	Equals(Set) bool
+	Length() int
+	Values() []string
+	Copy() Set
+	Sub(Set) Set
+}
+
+func NewUnsafeSet(values ...string) *unsafeSet {
+	set := &unsafeSet{make(map[string]struct{})}
+	for _, v := range values {
+		set.Add(v)
+	}
+	return set
+}
+
+func NewThreadsafeSet(values ...string) *tsafeSet {
+	us := NewUnsafeSet(values...)
+	return &tsafeSet{us, sync.RWMutex{}}
+}
+
+type unsafeSet struct {
+	d map[string]struct{}
+}
+
+// Add adds a new value to the set (no-op if the value is already present)
+func (us *unsafeSet) Add(value string) {
+	us.d[value] = struct{}{}
+}
+
+// Remove removes the given value from the set
+func (us *unsafeSet) Remove(value string) {
+	delete(us.d, value)
+}
+
+// Contains returns whether the set contains the given value
+func (us *unsafeSet) Contains(value string) (exists bool) {
+	_, exists = us.d[value]
+	return exists
+}
+
+// ContainsAll returns whether the set contains all given values
+func (us *unsafeSet) ContainsAll(values []string) bool {
+	for _, s := range values {
+		if !us.Contains(s) {
+			return false
+		}
+	}
+	return true
+}
+
+// Equals returns whether the contents of two sets are identical
+func (us *unsafeSet) Equals(other Set) bool {
+	v1 := sort.StringSlice(us.Values())
+	v2 := sort.StringSlice(other.Values())
+	v1.Sort()
+	v2.Sort()
+	return reflect.DeepEqual(v1, v2)
+}
+
+// Length returns the number of elements in the set
+func (us *unsafeSet) Length() int {
+	return len(us.d)
+}
+
+// Values returns the values of the Set in an unspecified order.
+func (us *unsafeSet) Values() (values []string) {
+	values = make([]string, 0)
+	for val := range us.d {
+		values = append(values, val)
+	}
+	return values
+}
+
+// Copy creates a new Set containing the values of the first
+func (us *unsafeSet) Copy() Set {
+	cp := NewUnsafeSet()
+	for val := range us.d {
+		cp.Add(val)
+	}
+
+	return cp
+}
+
+// Sub removes all elements in other from the set
+func (us *unsafeSet) Sub(other Set) Set {
+	oValues := other.Values()
+	result := us.Copy().(*unsafeSet)
+
+	for _, val := range oValues {
+		if _, ok := result.d[val]; !ok {
+			continue
+		}
+		delete(result.d, val)
+	}
+
+	return result
+}
+
+type tsafeSet struct {
+	us *unsafeSet
+	m  sync.RWMutex
+}
+
+func (ts *tsafeSet) Add(value string) {
+	ts.m.Lock()
+	defer ts.m.Unlock()
+	ts.us.Add(value)
+}
+
+func (ts *tsafeSet) Remove(value string) {
+	ts.m.Lock()
+	defer ts.m.Unlock()
+	ts.us.Remove(value)
+}
+
+func (ts *tsafeSet) Contains(value string) (exists bool) {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	return ts.us.Contains(value)
+}
+
+func (ts *tsafeSet) Equals(other Set) bool {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	return ts.us.Equals(other)
+}
+
+func (ts *tsafeSet) Length() int {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	return ts.us.Length()
+}
+
+func (ts *tsafeSet) Values() (values []string) {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	return ts.us.Values()
+}
+
+func (ts *tsafeSet) Copy() Set {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	usResult := ts.us.Copy().(*unsafeSet)
+	return &tsafeSet{usResult, sync.RWMutex{}}
+}
+
+func (ts *tsafeSet) Sub(other Set) Set {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	usResult := ts.us.Sub(other).(*unsafeSet)
+	return &tsafeSet{usResult, sync.RWMutex{}}
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/types/slice.go b/vendor/github.com/coreos/etcd/pkg/types/slice.go
new file mode 100644
index 0000000..0dd9ca7
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/types/slice.go
@@ -0,0 +1,22 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+// Uint64Slice implements sort interface
+type Uint64Slice []uint64
+
+func (p Uint64Slice) Len() int           { return len(p) }
+func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Uint64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/coreos/etcd/pkg/types/urls.go b/vendor/github.com/coreos/etcd/pkg/types/urls.go
new file mode 100644
index 0000000..9e5d03f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/types/urls.go
@@ -0,0 +1,82 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"net/url"
+	"sort"
+	"strings"
+)
+
+type URLs []url.URL
+
+func NewURLs(strs []string) (URLs, error) {
+	all := make([]url.URL, len(strs))
+	if len(all) == 0 {
+		return nil, errors.New("no valid URLs given")
+	}
+	for i, in := range strs {
+		in = strings.TrimSpace(in)
+		u, err := url.Parse(in)
+		if err != nil {
+			return nil, err
+		}
+		if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" {
+			return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in)
+		}
+		if _, _, err := net.SplitHostPort(u.Host); err != nil {
+			return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
+		}
+		if u.Path != "" {
+			return nil, fmt.Errorf("URL must not contain a path: %s", in)
+		}
+		all[i] = *u
+	}
+	us := URLs(all)
+	us.Sort()
+
+	return us, nil
+}
+
+func MustNewURLs(strs []string) URLs {
+	urls, err := NewURLs(strs)
+	if err != nil {
+		panic(err)
+	}
+	return urls
+}
+
+func (us URLs) String() string {
+	return strings.Join(us.StringSlice(), ",")
+}
+
+func (us *URLs) Sort() {
+	sort.Sort(us)
+}
+func (us URLs) Len() int           { return len(us) }
+func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() }
+func (us URLs) Swap(i, j int)      { us[i], us[j] = us[j], us[i] }
+
+func (us URLs) StringSlice() []string {
+	out := make([]string, len(us))
+	for i := range us {
+		out[i] = us[i].String()
+	}
+
+	return out
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go
new file mode 100644
index 0000000..47690cc
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go
@@ -0,0 +1,107 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// URLsMap is a map from a name to its URLs.
+type URLsMap map[string]URLs
+
+// NewURLsMap returns a URLsMap instantiated from the given string,
+// which consists of discovery-formatted names-to-URLs, like:
+// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380
+func NewURLsMap(s string) (URLsMap, error) {
+	m := parse(s)
+
+	cl := URLsMap{}
+	for name, urls := range m {
+		us, err := NewURLs(urls)
+		if err != nil {
+			return nil, err
+		}
+		cl[name] = us
+	}
+	return cl, nil
+}
+
+// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The
+// string values in the map can be multiple values separated by the sep string.
+func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) {
+	var err error
+	um := URLsMap{}
+	for k, v := range m {
+		um[k], err = NewURLs(strings.Split(v, sep))
+		if err != nil {
+			return nil, err
+		}
+	}
+	return um, nil
+}
+
+// String turns URLsMap into discovery-formatted name-to-URLs sorted by name.
+func (c URLsMap) String() string {
+	var pairs []string
+	for name, urls := range c {
+		for _, url := range urls {
+			pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String()))
+		}
+	}
+	sort.Strings(pairs)
+	return strings.Join(pairs, ",")
+}
+
+// URLs returns a list of all URLs.
+// The returned list is sorted in ascending lexicographical order.
+func (c URLsMap) URLs() []string {
+	var urls []string
+	for _, us := range c {
+		for _, u := range us {
+			urls = append(urls, u.String())
+		}
+	}
+	sort.Strings(urls)
+	return urls
+}
+
+// Len returns the size of URLsMap.
+func (c URLsMap) Len() int {
+	return len(c)
+}
+
+// parse parses the given string and returns a map listing the values specified for each key.
+func parse(s string) map[string][]string {
+	m := make(map[string][]string)
+	for s != "" {
+		key := s
+		if i := strings.IndexAny(key, ","); i >= 0 {
+			key, s = key[:i], key[i+1:]
+		} else {
+			s = ""
+		}
+		if key == "" {
+			continue
+		}
+		value := ""
+		if i := strings.Index(key, "="); i >= 0 {
+			key, value = key[:i], key[i+1:]
+		}
+		m[key] = append(m[key], value)
+	}
+	return m
+}
diff --git a/vendor/github.com/coreswitch/log/.gitignore b/vendor/github.com/coreswitch/log/.gitignore
new file mode 100644
index 0000000..a1338d6
--- /dev/null
+++ b/vendor/github.com/coreswitch/log/.gitignore
@@ -0,0 +1,14 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
diff --git a/vendor/github.com/coreswitch/log/LICENSE b/vendor/github.com/coreswitch/log/LICENSE
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/vendor/github.com/coreswitch/log/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/coreswitch/log/README.md b/vendor/github.com/coreswitch/log/README.md
new file mode 100644
index 0000000..8d09980
--- /dev/null
+++ b/vendor/github.com/coreswitch/log/README.md
@@ -0,0 +1,57 @@
+# log
+
+log is a wrapper for [logrus](https://github.com/sirupsen/logrus/) Go logging package. To help debugging, log added
+source code information and function name to logrus output.
+
+#### Example
+
+The simple example for enabling debug level for DEBUG and set output to stdout.
+
+```go
+package main
+
+import (
+  log "github.com/coreswitch/log"
+)
+
+func main() {
+  log.SetLevel("debug")
+  log.SetOutput("stdout")
+
+  log.With("animal", "walrus").Info("A walrus appears")
+}
+```
+
+The example set formatter to JSON.
+
+```go
+package main
+
+import (
+  log "github.com/coreswitch/log"
+)
+
+func main() {
+  log.SetLevel("debug")
+  log.SetJSONFormatter()
+
+  log.With("animal", "zebra").Debug("A zebra appears")
+}
+```
+
+Source field and function field is configurable.
+
+```go
+package main
+
+import (
+  log "github.com/coreswitch/log"
+)
+
+func main() {
+  log.SourceField = false
+  log.FuncField = false
+
+  log.With("animal", "bird").Warn("A crow appears")
+}
+```
diff --git a/vendor/github.com/coreswitch/log/log.go b/vendor/github.com/coreswitch/log/log.go
new file mode 100644
index 0000000..57b1165
--- /dev/null
+++ b/vendor/github.com/coreswitch/log/log.go
@@ -0,0 +1,183 @@
+// Copyright 2017 CoreSwitch
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log
+
+import (
+	"fmt"
+	"os"
+	"runtime"
+	"strings"
+
+	"github.com/sirupsen/logrus"
+)
+
+// Output source file information "filename:line number" to the log.
+var SourceField bool = true
+
+// Output function name to the log.
+var FuncField bool = true
+
+//
+func SetLevel(level string) error {
+	l, err := logrus.ParseLevel(level)
+	if err != nil {
+		return err
+	}
+	origLogger.SetLevel(l)
+	return nil
+}
+
+//
+func SetOutput(output string) error {
+	switch output {
+	case "stdout":
+		origLogger.Out = os.Stdout
+	case "stderr":
+		origLogger.Out = os.Stderr
+	default:
+		return fmt.Errorf("output format error")
+	}
+	return nil
+}
+
+//
+func SetJSONFormatter() {
+	origLogger.Formatter = &logrus.JSONFormatter{}
+}
+
+//
+func SetTextFormatter() {
+	origLogger.Formatter = &logrus.TextFormatter{
+		DisableTimestamp: true,
+	}
+}
+
+type Logger interface {
+	Debug(...interface{})
+	Debugf(string, ...interface{})
+	Info(...interface{})
+	Infof(string, ...interface{})
+	Warn(...interface{})
+	Warnf(string, ...interface{})
+	Error(...interface{})
+	Errorf(string, ...interface{})
+	With(key string, value interface{}) Logger
+}
+
+type logger struct {
+	entry *logrus.Entry
+}
+
+var origLogger = logrus.New()
+var baseLogger = logger{entry: logrus.NewEntry(origLogger)}
+
+func (l logger) sourced() *logrus.Entry {
+	e := l.entry
+
+	if !SourceField && !FuncField {
+		return e
+	}
+
+	pc, file, line, ok := runtime.Caller(2)
+	if !ok {
+		return e
+	}
+
+	if SourceField {
+		slash := strings.LastIndex(file, "/")
+		file = file[slash+1:]
+		e = e.WithField("source", fmt.Sprintf("%s:%d", file, line))
+	}
+
+	if FuncField {
+		if f := runtime.FuncForPC(pc); f != nil {
+			e = e.WithField("func", f.Name())
+		}
+	}
+
+	return e
+}
+
+func (l logger) Debug(args ...interface{}) {
+	l.sourced().Debug(args...)
+}
+
+func (l logger) Debugf(format string, args ...interface{}) {
+	l.sourced().Debugf(format, args...)
+}
+
+func (l logger) Info(args ...interface{}) {
+	l.sourced().Info(args...)
+}
+
+func (l logger) Infof(format string, args ...interface{}) {
+	l.sourced().Infof(format, args...)
+}
+
+func (l logger) Warn(args ...interface{}) {
+	l.sourced().Warn(args...)
+}
+
+func (l logger) Warnf(format string, args ...interface{}) {
+	l.sourced().Warnf(format, args...)
+}
+
+func (l logger) Error(args ...interface{}) {
+	l.sourced().Error(args...)
+}
+
+func (l logger) Errorf(format string, args ...interface{}) {
+	l.sourced().Errorf(format, args...)
+}
+
+func (l logger) With(key string, value interface{}) Logger {
+	return logger{l.entry.WithField(key, value)}
+}
+
+func With(key string, value interface{}) Logger {
+	return baseLogger.With(key, value)
+}
+
+func Debug(args ...interface{}) {
+	baseLogger.sourced().Debug(args...)
+}
+
+func Debugf(format string, args ...interface{}) {
+	baseLogger.sourced().Debugf(format, args...)
+}
+
+func Info(args ...interface{}) {
+	baseLogger.sourced().Info(args...)
+}
+
+func Infof(format string, args ...interface{}) {
+	baseLogger.sourced().Infof(format, args...)
+}
+
+func Warn(args ...interface{}) {
+	baseLogger.sourced().Warn(args...)
+}
+
+func Warnf(format string, args ...interface{}) {
+	baseLogger.sourced().Warnf(format, args...)
+}
+
+func Error(args ...interface{}) {
+	baseLogger.sourced().Error(args...)
+}
+
+func Errorf(format string, args ...interface{}) {
+	baseLogger.sourced().Errorf(format, args...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 0000000..bc52e96
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 0000000..7929947
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,145 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line.  The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// Go versions prior to 1.4 are disabled because they use a different layout
+// for interfaces which make the implementation of unsafeReflectValue more complex.
+// +build !js,!appengine,!safe,!disableunsafe,go1.4
+
+package spew
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+const (
+	// UnsafeDisabled is a build-time constant which specifies whether or
+	// not access to the unsafe package is available.
+	UnsafeDisabled = false
+
+	// ptrSize is the size of a pointer on the current arch.
+	ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+type flag uintptr
+
+var (
+	// flagRO indicates whether the value field of a reflect.Value
+	// is read-only.
+	flagRO flag
+
+	// flagAddr indicates whether the address of the reflect.Value's
+	// value may be taken.
+	flagAddr flag
+)
+
+// flagKindMask holds the bits that make up the kind
+// part of the flags field. In all the supported versions,
+// it is in the lower 5 bits.
+const flagKindMask = flag(0x1f)
+
+// Different versions of Go have used different
+// bit layouts for the flags type. This table
+// records the known combinations.
+var okFlags = []struct {
+	ro, addr flag
+}{{
+	// From Go 1.4 to 1.5
+	ro:   1 << 5,
+	addr: 1 << 7,
+}, {
+	// Up to Go tip.
+	ro:   1<<5 | 1<<6,
+	addr: 1 << 8,
+}}
+
+var flagValOffset = func() uintptr {
+	field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+	if !ok {
+		panic("reflect.Value has no flag field")
+	}
+	return field.Offset
+}()
+
+// flagField returns a pointer to the flag field of a reflect.Value.
+func flagField(v *reflect.Value) *flag {
+	return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data.  It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+	if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
+		return v
+	}
+	flagFieldPtr := flagField(&v)
+	*flagFieldPtr &^= flagRO
+	*flagFieldPtr |= flagAddr
+	return v
+}
+
+// Sanity checks against future reflect package changes
+// to the type or semantics of the Value.flag field.
+func init() {
+	field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+	if !ok {
+		panic("reflect.Value has no flag field")
+	}
+	if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
+		panic("reflect.Value flag field has changed kind")
+	}
+	type t0 int
+	var t struct {
+		A t0
+		// t0 will have flagEmbedRO set.
+		t0
+		// a will have flagStickyRO set
+		a t0
+	}
+	vA := reflect.ValueOf(t).FieldByName("A")
+	va := reflect.ValueOf(t).FieldByName("a")
+	vt0 := reflect.ValueOf(t).FieldByName("t0")
+
+	// Infer flagRO from the difference between the flags
+	// for the (otherwise identical) fields in t.
+	flagPublic := *flagField(&vA)
+	flagWithRO := *flagField(&va) | *flagField(&vt0)
+	flagRO = flagPublic ^ flagWithRO
+
+	// Infer flagAddr from the difference between a value
+	// taken from a pointer and not.
+	vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
+	flagNoPtr := *flagField(&vA)
+	flagPtr := *flagField(&vPtrA)
+	flagAddr = flagNoPtr ^ flagPtr
+
+	// Check that the inferred flags tally with one of the known versions.
+	for _, f := range okFlags {
+		if flagRO == f.ro && flagAddr == f.addr {
+			return
+		}
+	}
+	panic("reflect.Value read-only flag has changed semantics")
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 0000000..205c28d
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line.  The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe !go1.4
+
+package spew
+
+import "reflect"
+
+const (
+	// UnsafeDisabled is a build-time constant which specifies whether or
+	// not access to the unsafe package is available.
+	UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data.  However, doing this relies on access to
+// the unsafe package.  This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+	return v
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 0000000..1be8ce9
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"reflect"
+	"sort"
+	"strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead.  This mirrors
+// the technique used in the fmt package.
+var (
+	panicBytes            = []byte("(PANIC=")
+	plusBytes             = []byte("+")
+	iBytes                = []byte("i")
+	trueBytes             = []byte("true")
+	falseBytes            = []byte("false")
+	interfaceBytes        = []byte("(interface {})")
+	commaNewlineBytes     = []byte(",\n")
+	newlineBytes          = []byte("\n")
+	openBraceBytes        = []byte("{")
+	openBraceNewlineBytes = []byte("{\n")
+	closeBraceBytes       = []byte("}")
+	asteriskBytes         = []byte("*")
+	colonBytes            = []byte(":")
+	colonSpaceBytes       = []byte(": ")
+	openParenBytes        = []byte("(")
+	closeParenBytes       = []byte(")")
+	spaceBytes            = []byte(" ")
+	pointerChainBytes     = []byte("->")
+	nilAngleBytes         = []byte("<nil>")
+	maxNewlineBytes       = []byte("<max depth reached>\n")
+	maxShortBytes         = []byte("<max>")
+	circularBytes         = []byte("<already shown>")
+	circularShortBytes    = []byte("<shown>")
+	invalidAngleBytes     = []byte("<invalid>")
+	openBracketBytes      = []byte("[")
+	closeBracketBytes     = []byte("]")
+	percentBytes          = []byte("%")
+	precisionBytes        = []byte(".")
+	openAngleBytes        = []byte("<")
+	closeAngleBytes       = []byte(">")
+	openMapBytes          = []byte("map[")
+	closeMapBytes         = []byte("]")
+	lenEqualsBytes        = []byte("len=")
+	capEqualsBytes        = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+	if err := recover(); err != nil {
+		w.Write(panicBytes)
+		fmt.Fprintf(w, "%v", err)
+		w.Write(closeParenBytes)
+	}
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+	// We need an interface to check if the type implements the error or
+	// Stringer interface.  However, the reflect package won't give us an
+	// interface on certain things like unexported struct fields in order
+	// to enforce visibility rules.  We use unsafe, when it's available,
+	// to bypass these restrictions since this package does not mutate the
+	// values.
+	if !v.CanInterface() {
+		if UnsafeDisabled {
+			return false
+		}
+
+		v = unsafeReflectValue(v)
+	}
+
+	// Choose whether or not to do error and Stringer interface lookups against
+	// the base type or a pointer to the base type depending on settings.
+	// Technically calling one of these methods with a pointer receiver can
+	// mutate the value, however, types which choose to satisify an error or
+	// Stringer interface with a pointer receiver should not be mutating their
+	// state inside these interface methods.
+	if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+		v = unsafeReflectValue(v)
+	}
+	if v.CanAddr() {
+		v = v.Addr()
+	}
+
+	// Is it an error or Stringer?
+	switch iface := v.Interface().(type) {
+	case error:
+		defer catchPanic(w, v)
+		if cs.ContinueOnMethod {
+			w.Write(openParenBytes)
+			w.Write([]byte(iface.Error()))
+			w.Write(closeParenBytes)
+			w.Write(spaceBytes)
+			return false
+		}
+
+		w.Write([]byte(iface.Error()))
+		return true
+
+	case fmt.Stringer:
+		defer catchPanic(w, v)
+		if cs.ContinueOnMethod {
+			w.Write(openParenBytes)
+			w.Write([]byte(iface.String()))
+			w.Write(closeParenBytes)
+			w.Write(spaceBytes)
+			return false
+		}
+		w.Write([]byte(iface.String()))
+		return true
+	}
+	return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+	if val {
+		w.Write(trueBytes)
+	} else {
+		w.Write(falseBytes)
+	}
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+	w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+	w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+	w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+	r := real(c)
+	w.Write(openParenBytes)
+	w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+	i := imag(c)
+	if i >= 0 {
+		w.Write(plusBytes)
+	}
+	w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+	w.Write(iBytes)
+	w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+	// Null pointer.
+	num := uint64(p)
+	if num == 0 {
+		w.Write(nilAngleBytes)
+		return
+	}
+
+	// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+	buf := make([]byte, 18)
+
+	// It's simpler to construct the hex string right to left.
+	base := uint64(16)
+	i := len(buf) - 1
+	for num >= base {
+		buf[i] = hexDigits[num%base]
+		num /= base
+		i--
+	}
+	buf[i] = hexDigits[num]
+
+	// Add '0x' prefix.
+	i--
+	buf[i] = 'x'
+	i--
+	buf[i] = '0'
+
+	// Strip unused leading bytes.
+	buf = buf[i:]
+	w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+	values  []reflect.Value
+	strings []string // either nil or same len and values
+	cs      *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted.  It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+	vs := &valuesSorter{values: values, cs: cs}
+	if canSortSimply(vs.values[0].Kind()) {
+		return vs
+	}
+	if !cs.DisableMethods {
+		vs.strings = make([]string, len(values))
+		for i := range vs.values {
+			b := bytes.Buffer{}
+			if !handleMethods(cs, &b, vs.values[i]) {
+				vs.strings = nil
+				break
+			}
+			vs.strings[i] = b.String()
+		}
+	}
+	if vs.strings == nil && cs.SpewKeys {
+		vs.strings = make([]string, len(values))
+		for i := range vs.values {
+			vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+		}
+	}
+	return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+	// This switch parallels valueSortLess, except for the default case.
+	switch kind {
+	case reflect.Bool:
+		return true
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		return true
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		return true
+	case reflect.Float32, reflect.Float64:
+		return true
+	case reflect.String:
+		return true
+	case reflect.Uintptr:
+		return true
+	case reflect.Array:
+		return true
+	}
+	return false
+}
+
+// Len returns the number of values in the slice.  It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+	return len(s.values)
+}
+
+// Swap swaps the values at the passed indices.  It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+	s.values[i], s.values[j] = s.values[j], s.values[i]
+	if s.strings != nil {
+		s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+	}
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value.  It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+	switch a.Kind() {
+	case reflect.Bool:
+		return !a.Bool() && b.Bool()
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		return a.Int() < b.Int()
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		return a.Uint() < b.Uint()
+	case reflect.Float32, reflect.Float64:
+		return a.Float() < b.Float()
+	case reflect.String:
+		return a.String() < b.String()
+	case reflect.Uintptr:
+		return a.Uint() < b.Uint()
+	case reflect.Array:
+		// Compare the contents of both arrays.
+		l := a.Len()
+		for i := 0; i < l; i++ {
+			av := a.Index(i)
+			bv := b.Index(i)
+			if av.Interface() == bv.Interface() {
+				continue
+			}
+			return valueSortLess(av, bv)
+		}
+	}
+	return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j.  It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+	if s.strings == nil {
+		return valueSortLess(s.values[i], s.values[j])
+	}
+	return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer.  Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+	if len(values) == 0 {
+		return
+	}
+	sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 0000000..2e3d22f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values.  There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality.  Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation.  You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings.  See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+	// Indent specifies the string to use for each indentation level.  The
+	// global config instance that all top-level functions use set this to a
+	// single space by default.  If you would like more indentation, you might
+	// set this to a tab with "\t" or perhaps two spaces with "  ".
+	Indent string
+
+	// MaxDepth controls the maximum number of levels to descend into nested
+	// data structures.  The default, 0, means there is no limit.
+	//
+	// NOTE: Circular data structures are properly detected, so it is not
+	// necessary to set this value unless you specifically want to limit deeply
+	// nested data structures.
+	MaxDepth int
+
+	// DisableMethods specifies whether or not error and Stringer interfaces are
+	// invoked for types that implement them.
+	DisableMethods bool
+
+	// DisablePointerMethods specifies whether or not to check for and invoke
+	// error and Stringer interfaces on types which only accept a pointer
+	// receiver when the current type is not a pointer.
+	//
+	// NOTE: This might be an unsafe action since calling one of these methods
+	// with a pointer receiver could technically mutate the value, however,
+	// in practice, types which choose to satisify an error or Stringer
+	// interface with a pointer receiver should not be mutating their state
+	// inside these interface methods.  As a result, this option relies on
+	// access to the unsafe package, so it will not have any effect when
+	// running in environments without access to the unsafe package such as
+	// Google App Engine or with the "safe" build tag specified.
+	DisablePointerMethods bool
+
+	// DisablePointerAddresses specifies whether to disable the printing of
+	// pointer addresses. This is useful when diffing data structures in tests.
+	DisablePointerAddresses bool
+
+	// DisableCapacities specifies whether to disable the printing of capacities
+	// for arrays, slices, maps and channels. This is useful when diffing
+	// data structures in tests.
+	DisableCapacities bool
+
+	// ContinueOnMethod specifies whether or not recursion should continue once
+	// a custom error or Stringer interface is invoked.  The default, false,
+	// means it will print the results of invoking the custom error or Stringer
+	// interface and return immediately instead of continuing to recurse into
+	// the internals of the data type.
+	//
+	// NOTE: This flag does not have any effect if method invocation is disabled
+	// via the DisableMethods or DisablePointerMethods options.
+	ContinueOnMethod bool
+
+	// SortKeys specifies map keys should be sorted before being printed. Use
+	// this to have a more deterministic, diffable output.  Note that only
+	// native types (bool, int, uint, floats, uintptr and string) and types
+	// that support the error or Stringer interfaces (if methods are
+	// enabled) are supported, with other types sorted according to the
+	// reflect.Value.String() output which guarantees display stability.
+	SortKeys bool
+
+	// SpewKeys specifies that, as a last resort attempt, map keys should
+	// be spewed to strings and sorted by those strings.  This is only
+	// considered if SortKeys is true.
+	SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the formatted string as a value that satisfies error.  See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+	return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+	return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+	return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+	return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+	return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+	return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+	return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+	return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface.  As a result, it integrates cleanly with standard fmt package
+printing functions.  The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly.  It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+	return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w.  It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+	fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value.  It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c.  See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+	fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+	var buf bytes.Buffer
+	fdump(c, &buf, a...)
+	return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+	formatters = make([]interface{}, len(args))
+	for index, arg := range args {
+		formatters[index] = newFormatter(c, arg)
+	}
+	return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// 	Indent: " "
+// 	MaxDepth: 0
+// 	DisableMethods: false
+// 	DisablePointerMethods: false
+// 	ContinueOnMethod: false
+// 	SortKeys: false
+func NewDefaultConfig() *ConfigState {
+	return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 0000000..aacaac6
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output (only when using
+	  Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+	* Dump style which prints with newlines, customizable indentation,
+	  and additional debug information such as types and all pointer addresses
+	  used to indirect to the final value
+	* A custom Formatter interface that integrates cleanly with the standard fmt
+	  package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+	  similar to the default %v while providing the additional functionality
+	  outlined above and passing unsupported format verbs such as %x and %q
+	  along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew.  See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+	spew.Dump(myVar1, myVar2, ...)
+	spew.Fdump(someWriter, myVar1, myVar2, ...)
+	str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+	spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type.  For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions.  This allows concurrent configuration
+options.  See the ConfigState documentation for more details.
+
+The following configuration options are available:
+	* Indent
+		String to use for each indentation level for Dump functions.
+		It is a single space by default.  A popular alternative is "\t".
+
+	* MaxDepth
+		Maximum number of levels to descend into nested data structures.
+		There is no limit by default.
+
+	* DisableMethods
+		Disables invocation of error and Stringer interface methods.
+		Method invocation is enabled by default.
+
+	* DisablePointerMethods
+		Disables invocation of error and Stringer interface methods on types
+		which only accept pointer receivers from non-pointer variables.
+		Pointer method invocation is enabled by default.
+
+	* DisablePointerAddresses
+		DisablePointerAddresses specifies whether to disable the printing of
+		pointer addresses. This is useful when diffing data structures in tests.
+
+	* DisableCapacities
+		DisableCapacities specifies whether to disable the printing of
+		capacities for arrays, slices, maps and channels. This is useful when
+		diffing data structures in tests.
+
+	* ContinueOnMethod
+		Enables recursion into types after invoking error and Stringer interface
+		methods. Recursion after method invocation is disabled by default.
+
+	* SortKeys
+		Specifies map keys should be sorted before being printed. Use
+		this to have a more deterministic, diffable output.  Note that
+		only native types (bool, int, uint, floats, uintptr and string)
+		and types which implement error or Stringer interfaces are
+		supported with other types sorted according to the
+		reflect.Value.String() output which guarantees display
+		stability.  Natural map order is used by default.
+
+	* SpewKeys
+		Specifies that, as a last resort attempt, map keys should be
+		spewed to strings and sorted by those strings.  This is only
+		considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+	spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer.  For example, to dump to standard error:
+
+	spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+	str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+	(main.Foo) {
+	 unexportedField: (*main.Bar)(0xf84002e210)({
+	  flag: (main.Flag) flagTwo,
+	  data: (uintptr) <nil>
+	 }),
+	 ExportedField: (map[interface {}]interface {}) (len=1) {
+	  (string) (len=3) "one": (bool) true
+	 }
+	}
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+	([]uint8) (len=32 cap=32) {
+	 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |
+	 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!"#$%&'()*+,-./0|
+	 00000020  31 32                                             |12|
+	}
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf.  The
+functions have syntax you are most likely already familiar with:
+
+	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+	spew.Println(myVar, myVar2)
+	spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+	  %v: <**>5
+	 %+v: <**>(0xf8400420d0->0xf8400420c8)5
+	 %#v: (**uint8)5
+	%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+	  %v: <*>{1 <*><shown>}
+	 %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+	 %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+	%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output.  Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 0000000..f78d89f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+var (
+	// uint8Type is a reflect.Type representing a uint8.  It is used to
+	// convert cgo types to uint8 slices for hexdumping.
+	uint8Type = reflect.TypeOf(uint8(0))
+
+	// cCharRE is a regular expression that matches a cgo char.
+	// It is used to detect character arrays to hexdump them.
+	cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
+
+	// cUnsignedCharRE is a regular expression that matches a cgo unsigned
+	// char.  It is used to detect unsigned character arrays to hexdump
+	// them.
+	cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
+
+	// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+	// It is used to detect uint8_t arrays to hexdump them.
+	cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+	w                io.Writer
+	depth            int
+	pointers         map[uintptr]int
+	ignoreNextType   bool
+	ignoreNextIndent bool
+	cs               *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+	if d.ignoreNextIndent {
+		d.ignoreNextIndent = false
+		return
+	}
+	d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+	if v.Kind() == reflect.Interface && !v.IsNil() {
+		v = v.Elem()
+	}
+	return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+	// Remove pointers at or below the current depth from map used to detect
+	// circular refs.
+	for k, depth := range d.pointers {
+		if depth >= d.depth {
+			delete(d.pointers, k)
+		}
+	}
+
+	// Keep list of all dereferenced pointers to show later.
+	pointerChain := make([]uintptr, 0)
+
+	// Figure out how many levels of indirection there are by dereferencing
+	// pointers and unpacking interfaces down the chain while detecting circular
+	// references.
+	nilFound := false
+	cycleFound := false
+	indirects := 0
+	ve := v
+	for ve.Kind() == reflect.Ptr {
+		if ve.IsNil() {
+			nilFound = true
+			break
+		}
+		indirects++
+		addr := ve.Pointer()
+		pointerChain = append(pointerChain, addr)
+		if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+			cycleFound = true
+			indirects--
+			break
+		}
+		d.pointers[addr] = d.depth
+
+		ve = ve.Elem()
+		if ve.Kind() == reflect.Interface {
+			if ve.IsNil() {
+				nilFound = true
+				break
+			}
+			ve = ve.Elem()
+		}
+	}
+
+	// Display type information.
+	d.w.Write(openParenBytes)
+	d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+	d.w.Write([]byte(ve.Type().String()))
+	d.w.Write(closeParenBytes)
+
+	// Display pointer information.
+	if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+		d.w.Write(openParenBytes)
+		for i, addr := range pointerChain {
+			if i > 0 {
+				d.w.Write(pointerChainBytes)
+			}
+			printHexPtr(d.w, addr)
+		}
+		d.w.Write(closeParenBytes)
+	}
+
+	// Display dereferenced value.
+	d.w.Write(openParenBytes)
+	switch {
+	case nilFound:
+		d.w.Write(nilAngleBytes)
+
+	case cycleFound:
+		d.w.Write(circularBytes)
+
+	default:
+		d.ignoreNextType = true
+		d.dump(ve)
+	}
+	d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices.  Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+	// Determine whether this type should be hex dumped or not.  Also,
+	// for types which should be hexdumped, try to use the underlying data
+	// first, then fall back to trying to convert them to a uint8 slice.
+	var buf []uint8
+	doConvert := false
+	doHexDump := false
+	numEntries := v.Len()
+	if numEntries > 0 {
+		vt := v.Index(0).Type()
+		vts := vt.String()
+		switch {
+		// C types that need to be converted.
+		case cCharRE.MatchString(vts):
+			fallthrough
+		case cUnsignedCharRE.MatchString(vts):
+			fallthrough
+		case cUint8tCharRE.MatchString(vts):
+			doConvert = true
+
+		// Try to use existing uint8 slices and fall back to converting
+		// and copying if that fails.
+		case vt.Kind() == reflect.Uint8:
+			// We need an addressable interface to convert the type
+			// to a byte slice.  However, the reflect package won't
+			// give us an interface on certain things like
+			// unexported struct fields in order to enforce
+			// visibility rules.  We use unsafe, when available, to
+			// bypass these restrictions since this package does not
+			// mutate the values.
+			vs := v
+			if !vs.CanInterface() || !vs.CanAddr() {
+				vs = unsafeReflectValue(vs)
+			}
+			if !UnsafeDisabled {
+				vs = vs.Slice(0, numEntries)
+
+				// Use the existing uint8 slice if it can be
+				// type asserted.
+				iface := vs.Interface()
+				if slice, ok := iface.([]uint8); ok {
+					buf = slice
+					doHexDump = true
+					break
+				}
+			}
+
+			// The underlying data needs to be converted if it can't
+			// be type asserted to a uint8 slice.
+			doConvert = true
+		}
+
+		// Copy and convert the underlying type if needed.
+		if doConvert && vt.ConvertibleTo(uint8Type) {
+			// Convert and copy each element into a uint8 byte
+			// slice.
+			buf = make([]uint8, numEntries)
+			for i := 0; i < numEntries; i++ {
+				vv := v.Index(i)
+				buf[i] = uint8(vv.Convert(uint8Type).Uint())
+			}
+			doHexDump = true
+		}
+	}
+
+	// Hexdump the entire slice as needed.
+	if doHexDump {
+		indent := strings.Repeat(d.cs.Indent, d.depth)
+		str := indent + hex.Dump(buf)
+		str = strings.Replace(str, "\n", "\n"+indent, -1)
+		str = strings.TrimRight(str, d.cs.Indent)
+		d.w.Write([]byte(str))
+		return
+	}
+
+	// Recursively call dump for each item.
+	for i := 0; i < numEntries; i++ {
+		d.dump(d.unpackValue(v.Index(i)))
+		if i < (numEntries - 1) {
+			d.w.Write(commaNewlineBytes)
+		} else {
+			d.w.Write(newlineBytes)
+		}
+	}
+}
+
+// dump is the main workhorse for dumping a value.  It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately.  It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+	// Handle invalid reflect values immediately.
+	kind := v.Kind()
+	if kind == reflect.Invalid {
+		d.w.Write(invalidAngleBytes)
+		return
+	}
+
+	// Handle pointers specially.
+	if kind == reflect.Ptr {
+		d.indent()
+		d.dumpPtr(v)
+		return
+	}
+
+	// Print type information unless already handled elsewhere.
+	if !d.ignoreNextType {
+		d.indent()
+		d.w.Write(openParenBytes)
+		d.w.Write([]byte(v.Type().String()))
+		d.w.Write(closeParenBytes)
+		d.w.Write(spaceBytes)
+	}
+	d.ignoreNextType = false
+
+	// Display length and capacity if the built-in len and cap functions
+	// work with the value's kind and the len/cap itself is non-zero.
+	valueLen, valueCap := 0, 0
+	switch v.Kind() {
+	case reflect.Array, reflect.Slice, reflect.Chan:
+		valueLen, valueCap = v.Len(), v.Cap()
+	case reflect.Map, reflect.String:
+		valueLen = v.Len()
+	}
+	if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+		d.w.Write(openParenBytes)
+		if valueLen != 0 {
+			d.w.Write(lenEqualsBytes)
+			printInt(d.w, int64(valueLen), 10)
+		}
+		if !d.cs.DisableCapacities && valueCap != 0 {
+			if valueLen != 0 {
+				d.w.Write(spaceBytes)
+			}
+			d.w.Write(capEqualsBytes)
+			printInt(d.w, int64(valueCap), 10)
+		}
+		d.w.Write(closeParenBytes)
+		d.w.Write(spaceBytes)
+	}
+
+	// Call Stringer/error interfaces if they exist and the handle methods flag
+	// is enabled
+	if !d.cs.DisableMethods {
+		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+			if handled := handleMethods(d.cs, d.w, v); handled {
+				return
+			}
+		}
+	}
+
+	switch kind {
+	case reflect.Invalid:
+		// Do nothing.  We should never get here since invalid has already
+		// been handled above.
+
+	case reflect.Bool:
+		printBool(d.w, v.Bool())
+
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		printInt(d.w, v.Int(), 10)
+
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		printUint(d.w, v.Uint(), 10)
+
+	case reflect.Float32:
+		printFloat(d.w, v.Float(), 32)
+
+	case reflect.Float64:
+		printFloat(d.w, v.Float(), 64)
+
+	case reflect.Complex64:
+		printComplex(d.w, v.Complex(), 32)
+
+	case reflect.Complex128:
+		printComplex(d.w, v.Complex(), 64)
+
+	case reflect.Slice:
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+			break
+		}
+		fallthrough
+
+	case reflect.Array:
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			d.dumpSlice(v)
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.String:
+		d.w.Write([]byte(strconv.Quote(v.String())))
+
+	case reflect.Interface:
+		// The only time we should get here is for nil interfaces due to
+		// unpackValue calls.
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+		}
+
+	case reflect.Ptr:
+		// Do nothing.  We should never get here since pointers have already
+		// been handled above.
+
+	case reflect.Map:
+		// nil maps should be indicated as different than empty maps
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+			break
+		}
+
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			numEntries := v.Len()
+			keys := v.MapKeys()
+			if d.cs.SortKeys {
+				sortValues(keys, d.cs)
+			}
+			for i, key := range keys {
+				d.dump(d.unpackValue(key))
+				d.w.Write(colonSpaceBytes)
+				d.ignoreNextIndent = true
+				d.dump(d.unpackValue(v.MapIndex(key)))
+				if i < (numEntries - 1) {
+					d.w.Write(commaNewlineBytes)
+				} else {
+					d.w.Write(newlineBytes)
+				}
+			}
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.Struct:
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			vt := v.Type()
+			numFields := v.NumField()
+			for i := 0; i < numFields; i++ {
+				d.indent()
+				vtf := vt.Field(i)
+				d.w.Write([]byte(vtf.Name))
+				d.w.Write(colonSpaceBytes)
+				d.ignoreNextIndent = true
+				d.dump(d.unpackValue(v.Field(i)))
+				if i < (numFields - 1) {
+					d.w.Write(commaNewlineBytes)
+				} else {
+					d.w.Write(newlineBytes)
+				}
+			}
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.Uintptr:
+		printHexPtr(d.w, uintptr(v.Uint()))
+
+	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		printHexPtr(d.w, v.Pointer())
+
+	// There were not any other types at the time this code was written, but
+	// fall back to letting the default fmt package handle it in case any new
+	// types are added.
+	default:
+		if v.CanInterface() {
+			fmt.Fprintf(d.w, "%v", v.Interface())
+		} else {
+			fmt.Fprintf(d.w, "%v", v.String())
+		}
+	}
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+	for _, arg := range a {
+		if arg == nil {
+			w.Write(interfaceBytes)
+			w.Write(spaceBytes)
+			w.Write(nilAngleBytes)
+			w.Write(newlineBytes)
+			continue
+		}
+
+		d := dumpState{w: w, cs: cs}
+		d.pointers = make(map[uintptr]int)
+		d.dump(reflect.ValueOf(arg))
+		d.w.Write(newlineBytes)
+	}
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w.  It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+	fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+	var buf bytes.Buffer
+	fdump(&Config, &buf, a...)
+	return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value.  It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config.  See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+	fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 0000000..b04edb7
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation.  The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+	value          interface{}
+	fs             fmt.State
+	depth          int
+	pointers       map[uintptr]int
+	ignoreNextType bool
+	cs             *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type.  Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+	buf := bytes.NewBuffer(percentBytes)
+
+	for _, flag := range supportedFlags {
+		if f.fs.Flag(int(flag)) {
+			buf.WriteRune(flag)
+		}
+	}
+
+	buf.WriteRune('v')
+
+	format = buf.String()
+	return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package.  This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+	buf := bytes.NewBuffer(percentBytes)
+
+	for _, flag := range supportedFlags {
+		if f.fs.Flag(int(flag)) {
+			buf.WriteRune(flag)
+		}
+	}
+
+	if width, ok := f.fs.Width(); ok {
+		buf.WriteString(strconv.Itoa(width))
+	}
+
+	if precision, ok := f.fs.Precision(); ok {
+		buf.Write(precisionBytes)
+		buf.WriteString(strconv.Itoa(precision))
+	}
+
+	buf.WriteRune(verb)
+
+	format = buf.String()
+	return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+	if v.Kind() == reflect.Interface {
+		f.ignoreNextType = false
+		if !v.IsNil() {
+			v = v.Elem()
+		}
+	}
+	return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+	// Display nil if top level pointer is nil.
+	showTypes := f.fs.Flag('#')
+	if v.IsNil() && (!showTypes || f.ignoreNextType) {
+		f.fs.Write(nilAngleBytes)
+		return
+	}
+
+	// Remove pointers at or below the current depth from map used to detect
+	// circular refs.
+	for k, depth := range f.pointers {
+		if depth >= f.depth {
+			delete(f.pointers, k)
+		}
+	}
+
+	// Keep list of all dereferenced pointers to possibly show later.
+	pointerChain := make([]uintptr, 0)
+
+	// Figure out how many levels of indirection there are by derferencing
+	// pointers and unpacking interfaces down the chain while detecting circular
+	// references.
+	nilFound := false
+	cycleFound := false
+	indirects := 0
+	ve := v
+	for ve.Kind() == reflect.Ptr {
+		if ve.IsNil() {
+			nilFound = true
+			break
+		}
+		indirects++
+		addr := ve.Pointer()
+		pointerChain = append(pointerChain, addr)
+		if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+			cycleFound = true
+			indirects--
+			break
+		}
+		f.pointers[addr] = f.depth
+
+		ve = ve.Elem()
+		if ve.Kind() == reflect.Interface {
+			if ve.IsNil() {
+				nilFound = true
+				break
+			}
+			ve = ve.Elem()
+		}
+	}
+
+	// Display type or indirection level depending on flags.
+	if showTypes && !f.ignoreNextType {
+		f.fs.Write(openParenBytes)
+		f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+		f.fs.Write([]byte(ve.Type().String()))
+		f.fs.Write(closeParenBytes)
+	} else {
+		if nilFound || cycleFound {
+			indirects += strings.Count(ve.Type().String(), "*")
+		}
+		f.fs.Write(openAngleBytes)
+		f.fs.Write([]byte(strings.Repeat("*", indirects)))
+		f.fs.Write(closeAngleBytes)
+	}
+
+	// Display pointer information depending on flags.
+	if f.fs.Flag('+') && (len(pointerChain) > 0) {
+		f.fs.Write(openParenBytes)
+		for i, addr := range pointerChain {
+			if i > 0 {
+				f.fs.Write(pointerChainBytes)
+			}
+			printHexPtr(f.fs, addr)
+		}
+		f.fs.Write(closeParenBytes)
+	}
+
+	// Display dereferenced value.
+	switch {
+	case nilFound:
+		f.fs.Write(nilAngleBytes)
+
+	case cycleFound:
+		f.fs.Write(circularShortBytes)
+
+	default:
+		f.ignoreNextType = true
+		f.format(ve)
+	}
+}
+
+// format is the main workhorse for providing the Formatter interface.  It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately.  It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+	// Handle invalid reflect values immediately.
+	kind := v.Kind()
+	if kind == reflect.Invalid {
+		f.fs.Write(invalidAngleBytes)
+		return
+	}
+
+	// Handle pointers specially.
+	if kind == reflect.Ptr {
+		f.formatPtr(v)
+		return
+	}
+
+	// Print type information unless already handled elsewhere.
+	if !f.ignoreNextType && f.fs.Flag('#') {
+		f.fs.Write(openParenBytes)
+		f.fs.Write([]byte(v.Type().String()))
+		f.fs.Write(closeParenBytes)
+	}
+	f.ignoreNextType = false
+
+	// Call Stringer/error interfaces if they exist and the handle methods
+	// flag is enabled.
+	if !f.cs.DisableMethods {
+		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+			if handled := handleMethods(f.cs, f.fs, v); handled {
+				return
+			}
+		}
+	}
+
+	switch kind {
+	case reflect.Invalid:
+		// Do nothing.  We should never get here since invalid has already
+		// been handled above.
+
+	case reflect.Bool:
+		printBool(f.fs, v.Bool())
+
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		printInt(f.fs, v.Int(), 10)
+
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		printUint(f.fs, v.Uint(), 10)
+
+	case reflect.Float32:
+		printFloat(f.fs, v.Float(), 32)
+
+	case reflect.Float64:
+		printFloat(f.fs, v.Float(), 64)
+
+	case reflect.Complex64:
+		printComplex(f.fs, v.Complex(), 32)
+
+	case reflect.Complex128:
+		printComplex(f.fs, v.Complex(), 64)
+
+	case reflect.Slice:
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+			break
+		}
+		fallthrough
+
+	case reflect.Array:
+		f.fs.Write(openBracketBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			numEntries := v.Len()
+			for i := 0; i < numEntries; i++ {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				f.ignoreNextType = true
+				f.format(f.unpackValue(v.Index(i)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeBracketBytes)
+
+	case reflect.String:
+		f.fs.Write([]byte(v.String()))
+
+	case reflect.Interface:
+		// The only time we should get here is for nil interfaces due to
+		// unpackValue calls.
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+		}
+
+	case reflect.Ptr:
+		// Do nothing.  We should never get here since pointers have already
+		// been handled above.
+
+	case reflect.Map:
+		// nil maps should be indicated as different than empty maps
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+			break
+		}
+
+		f.fs.Write(openMapBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			keys := v.MapKeys()
+			if f.cs.SortKeys {
+				sortValues(keys, f.cs)
+			}
+			for i, key := range keys {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				f.ignoreNextType = true
+				f.format(f.unpackValue(key))
+				f.fs.Write(colonBytes)
+				f.ignoreNextType = true
+				f.format(f.unpackValue(v.MapIndex(key)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeMapBytes)
+
+	case reflect.Struct:
+		numFields := v.NumField()
+		f.fs.Write(openBraceBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			vt := v.Type()
+			for i := 0; i < numFields; i++ {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				vtf := vt.Field(i)
+				if f.fs.Flag('+') || f.fs.Flag('#') {
+					f.fs.Write([]byte(vtf.Name))
+					f.fs.Write(colonBytes)
+				}
+				f.format(f.unpackValue(v.Field(i)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeBraceBytes)
+
+	case reflect.Uintptr:
+		printHexPtr(f.fs, uintptr(v.Uint()))
+
+	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		printHexPtr(f.fs, v.Pointer())
+
+	// There were not any other types at the time this code was written, but
+	// fall back to letting the default fmt package handle it if any get added.
+	default:
+		format := f.buildDefaultFormat()
+		if v.CanInterface() {
+			fmt.Fprintf(f.fs, format, v.Interface())
+		} else {
+			fmt.Fprintf(f.fs, format, v.String())
+		}
+	}
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+	f.fs = fs
+
+	// Use standard formatting for verbs that are not v.
+	if verb != 'v' {
+		format := f.constructOrigFormat(verb)
+		fmt.Fprintf(fs, format, f.value)
+		return
+	}
+
+	if f.value == nil {
+		if fs.Flag('#') {
+			fs.Write(interfaceBytes)
+		}
+		fs.Write(nilAngleBytes)
+		return
+	}
+
+	f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+	fs := &formatState{value: v, cs: cs}
+	fs.pointers = make(map[uintptr]int)
+	return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface.  As a result, it integrates cleanly with standard fmt package
+printing functions.  The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly.  It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+	return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 0000000..32c0e33
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"fmt"
+	"io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the formatted string as a value that satisfies error.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+	return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+	return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+	return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+	return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+	return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+	return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+	return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+	return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+	formatters = make([]interface{}, len(args))
+	for index, arg := range args {
+		formatters[index] = NewFormatter(arg)
+	}
+	return formatters
+}
diff --git a/vendor/github.com/eapache/go-resiliency/LICENSE b/vendor/github.com/eapache/go-resiliency/LICENSE
new file mode 100644
index 0000000..698a3f5
--- /dev/null
+++ b/vendor/github.com/eapache/go-resiliency/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/eapache/go-resiliency/breaker/README.md b/vendor/github.com/eapache/go-resiliency/breaker/README.md
new file mode 100644
index 0000000..2d1b3d9
--- /dev/null
+++ b/vendor/github.com/eapache/go-resiliency/breaker/README.md
@@ -0,0 +1,34 @@
+circuit-breaker
+===============
+
+[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency)
+[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/breaker?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/breaker)
+[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html)
+
+The circuit-breaker resiliency pattern for golang.
+
+Creating a breaker takes three parameters:
+- error threshold (for opening the breaker)
+- success threshold (for closing the breaker)
+- timeout (how long to keep the breaker open)
+
+```go
+b := breaker.New(3, 1, 5*time.Second)
+
+for {
+	result := b.Run(func() error {
+		// communicate with some external service and
+		// return an error if the communication failed
+		return nil
+	})
+
+	switch result {
+	case nil:
+		// success!
+	case breaker.ErrBreakerOpen:
+		// our function wasn't run because the breaker was open
+	default:
+		// some other error
+	}
+}
+```
diff --git a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go
new file mode 100644
index 0000000..f88ca72
--- /dev/null
+++ b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go
@@ -0,0 +1,161 @@
+// Package breaker implements the circuit-breaker resiliency pattern for Go.
+package breaker
+
+import (
+	"errors"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// ErrBreakerOpen is the error returned from Run() when the function is not executed
+// because the breaker is currently open.
+var ErrBreakerOpen = errors.New("circuit breaker is open")
+
+const (
+	closed uint32 = iota
+	open
+	halfOpen
+)
+
+// Breaker implements the circuit-breaker resiliency pattern
+type Breaker struct {
+	errorThreshold, successThreshold int
+	timeout                          time.Duration
+
+	lock              sync.Mutex
+	state             uint32
+	errors, successes int
+	lastError         time.Time
+}
+
+// New constructs a new circuit-breaker that starts closed.
+// From closed, the breaker opens if "errorThreshold" errors are seen
+// without an error-free period of at least "timeout". From open, the
+// breaker half-closes after "timeout". From half-open, the breaker closes
+// after "successThreshold" consecutive successes, or opens on a single error.
+func New(errorThreshold, successThreshold int, timeout time.Duration) *Breaker {
+	return &Breaker{
+		errorThreshold:   errorThreshold,
+		successThreshold: successThreshold,
+		timeout:          timeout,
+	}
+}
+
+// Run will either return ErrBreakerOpen immediately if the circuit-breaker is
+// already open, or it will run the given function and pass along its return
+// value. It is safe to call Run concurrently on the same Breaker.
+func (b *Breaker) Run(work func() error) error {
+	state := atomic.LoadUint32(&b.state)
+
+	if state == open {
+		return ErrBreakerOpen
+	}
+
+	return b.doWork(state, work)
+}
+
+// Go will either return ErrBreakerOpen immediately if the circuit-breaker is
+// already open, or it will run the given function in a separate goroutine.
+// If the function is run, Go will return nil immediately, and will *not* return
+// the return value of the function. It is safe to call Go concurrently on the
+// same Breaker.
+func (b *Breaker) Go(work func() error) error {
+	state := atomic.LoadUint32(&b.state)
+
+	if state == open {
+		return ErrBreakerOpen
+	}
+
+	// errcheck complains about ignoring the error return value, but
+	// that's on purpose; if you want an error from a goroutine you have to
+	// get it over a channel or something
+	go b.doWork(state, work)
+
+	return nil
+}
+
+func (b *Breaker) doWork(state uint32, work func() error) error {
+	var panicValue interface{}
+
+	result := func() error {
+		defer func() {
+			panicValue = recover()
+		}()
+		return work()
+	}()
+
+	if result == nil && panicValue == nil && state == closed {
+		// short-circuit the normal, success path without contending
+		// on the lock
+		return nil
+	}
+
+	// oh well, I guess we have to contend on the lock
+	b.processResult(result, panicValue)
+
+	if panicValue != nil {
+		// as close as Go lets us come to a "rethrow" although unfortunately
+		// we lose the original panicing location
+		panic(panicValue)
+	}
+
+	return result
+}
+
+func (b *Breaker) processResult(result error, panicValue interface{}) {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	if result == nil && panicValue == nil {
+		if b.state == halfOpen {
+			b.successes++
+			if b.successes == b.successThreshold {
+				b.closeBreaker()
+			}
+		}
+	} else {
+		if b.errors > 0 {
+			expiry := b.lastError.Add(b.timeout)
+			if time.Now().After(expiry) {
+				b.errors = 0
+			}
+		}
+
+		switch b.state {
+		case closed:
+			b.errors++
+			if b.errors == b.errorThreshold {
+				b.openBreaker()
+			} else {
+				b.lastError = time.Now()
+			}
+		case halfOpen:
+			b.openBreaker()
+		}
+	}
+}
+
+func (b *Breaker) openBreaker() {
+	b.changeState(open)
+	go b.timer()
+}
+
+func (b *Breaker) closeBreaker() {
+	b.changeState(closed)
+}
+
+func (b *Breaker) timer() {
+	time.Sleep(b.timeout)
+
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	b.changeState(halfOpen)
+}
+
+func (b *Breaker) changeState(newState uint32) {
+	b.errors = 0
+	b.successes = 0
+	atomic.StoreUint32(&b.state, newState)
+}
diff --git a/vendor/github.com/eapache/go-xerial-snappy/.gitignore b/vendor/github.com/eapache/go-xerial-snappy/.gitignore
new file mode 100644
index 0000000..daf913b
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/eapache/go-xerial-snappy/.travis.yml b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml
new file mode 100644
index 0000000..d6cf4f1
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+
+go:
+- 1.5.4
+- 1.6.1
+
+sudo: false
diff --git a/vendor/github.com/eapache/go-xerial-snappy/LICENSE b/vendor/github.com/eapache/go-xerial-snappy/LICENSE
new file mode 100644
index 0000000..5bf3688
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/eapache/go-xerial-snappy/README.md b/vendor/github.com/eapache/go-xerial-snappy/README.md
new file mode 100644
index 0000000..3f2695c
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/README.md
@@ -0,0 +1,13 @@
+# go-xerial-snappy
+
+[![Build Status](https://travis-ci.org/eapache/go-xerial-snappy.svg?branch=master)](https://travis-ci.org/eapache/go-xerial-snappy)
+
+Xerial-compatible Snappy framing support for golang.
+
+Packages using Xerial for snappy encoding use a framing format incompatible with
+basically everything else in existence. This package wraps Go's built-in snappy
+package to support it.
+
+Apps that use this format include Apache Kafka (see
+https://github.com/dpkp/kafka-python/issues/126#issuecomment-35478921 for
+details).
diff --git a/vendor/github.com/eapache/go-xerial-snappy/fuzz.go b/vendor/github.com/eapache/go-xerial-snappy/fuzz.go
new file mode 100644
index 0000000..6a46f47
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/fuzz.go
@@ -0,0 +1,16 @@
+// +build gofuzz
+
+package snappy
+
+func Fuzz(data []byte) int {
+	decode, err := Decode(data)
+	if decode == nil && err == nil {
+		panic("nil error with nil result")
+	}
+
+	if err != nil {
+		return 0
+	}
+
+	return 1
+}
diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy.go b/vendor/github.com/eapache/go-xerial-snappy/snappy.go
new file mode 100644
index 0000000..ea8f7af
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/snappy.go
@@ -0,0 +1,131 @@
+package snappy
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+
+	master "github.com/golang/snappy"
+)
+
+const (
+	sizeOffset = 16
+	sizeBytes  = 4
+)
+
+var (
+	xerialHeader = []byte{130, 83, 78, 65, 80, 80, 89, 0}
+
+	// This is xerial version 1 and minimally compatible with version 1
+	xerialVersionInfo = []byte{0, 0, 0, 1, 0, 0, 0, 1}
+
+	// ErrMalformed is returned by the decoder when the xerial framing
+	// is malformed
+	ErrMalformed = errors.New("malformed xerial framing")
+)
+
+func min(x, y int) int {
+    if x < y {
+        return x
+    }
+    return y
+}
+
+// Encode encodes data as snappy with no framing header.
+func Encode(src []byte) []byte {
+	return master.Encode(nil, src)
+}
+
+// EncodeStream *appends* to the specified 'dst' the compressed
+// 'src' in xerial framing format. If 'dst' does not have enough
+// capacity, then a new slice will be allocated. If 'dst' has
+// non-zero length, then if *must* have been built using this function.
+func EncodeStream(dst, src []byte) []byte {
+	if len(dst) == 0 {
+		dst = append(dst, xerialHeader...)
+		dst = append(dst, xerialVersionInfo...)
+	}
+
+	// Snappy encode in blocks of maximum 32KB
+	var (
+		max = len(src)
+		blockSize = 32 * 1024
+		pos   = 0
+		chunk []byte
+	)
+
+	for pos < max {
+		newPos := min(pos + blockSize, max)
+		chunk = master.Encode(chunk[:cap(chunk)], src[pos:newPos])
+
+		// First encode the compressed size (big-endian)
+		// Put* panics if the buffer is too small, so pad 4 bytes first
+		origLen := len(dst)
+		dst = append(dst, dst[0:4]...)
+		binary.BigEndian.PutUint32(dst[origLen:], uint32(len(chunk)))
+
+		// And now the compressed data
+		dst = append(dst, chunk...)
+		pos = newPos
+	}
+	return dst
+}
+
+// Decode decodes snappy data whether it is traditional unframed
+// or includes the xerial framing format.
+func Decode(src []byte) ([]byte, error) {
+	return DecodeInto(nil, src)
+}
+
+// DecodeInto decodes snappy data whether it is traditional unframed
+// or includes the xerial framing format into the specified `dst`.
+// It is assumed that the entirety of `dst` including all capacity is available
+// for use by this function. If `dst` is nil *or* insufficiently large to hold
+// the decoded `src`, new space will be allocated.
+func DecodeInto(dst, src []byte) ([]byte, error) {
+	var max = len(src)
+	if max < len(xerialHeader) {
+		return nil, ErrMalformed
+	}
+
+	if !bytes.Equal(src[:8], xerialHeader) {
+		return master.Decode(dst[:cap(dst)], src)
+	}
+
+	if max < sizeOffset+sizeBytes {
+		return nil, ErrMalformed
+	}
+
+	if dst == nil {
+		dst = make([]byte, 0, len(src))
+	}
+
+	dst = dst[:0]
+	var (
+		pos   = sizeOffset
+		chunk []byte
+		err       error
+	)
+
+	for pos+sizeBytes <= max {
+		size := int(binary.BigEndian.Uint32(src[pos : pos+sizeBytes]))
+		pos += sizeBytes
+
+		nextPos := pos + size
+		// On architectures where int is 32-bytes wide size + pos could
+		// overflow so we need to check the low bound as well as the
+		// high
+		if nextPos < pos || nextPos > max {
+			return nil, ErrMalformed
+		}
+
+		chunk, err = master.Decode(chunk[:cap(chunk)], src[pos:nextPos])
+
+		if err != nil {
+			return nil, err
+		}
+		pos = nextPos
+		dst = append(dst, chunk...)
+	}
+	return dst, nil
+}
diff --git a/vendor/github.com/eapache/queue/.gitignore b/vendor/github.com/eapache/queue/.gitignore
new file mode 100644
index 0000000..8365624
--- /dev/null
+++ b/vendor/github.com/eapache/queue/.gitignore
@@ -0,0 +1,23 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
diff --git a/vendor/github.com/eapache/queue/.travis.yml b/vendor/github.com/eapache/queue/.travis.yml
new file mode 100644
index 0000000..235a40a
--- /dev/null
+++ b/vendor/github.com/eapache/queue/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+sudo: false
+
+go:
+  - 1.2
+  - 1.3
+  - 1.4
diff --git a/vendor/github.com/eapache/queue/LICENSE b/vendor/github.com/eapache/queue/LICENSE
new file mode 100644
index 0000000..d5f36db
--- /dev/null
+++ b/vendor/github.com/eapache/queue/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/eapache/queue/README.md b/vendor/github.com/eapache/queue/README.md
new file mode 100644
index 0000000..8e78233
--- /dev/null
+++ b/vendor/github.com/eapache/queue/README.md
@@ -0,0 +1,16 @@
+Queue
+=====
+
+[![Build Status](https://travis-ci.org/eapache/queue.svg)](https://travis-ci.org/eapache/queue)
+[![GoDoc](https://godoc.org/github.com/eapache/queue?status.png)](https://godoc.org/github.com/eapache/queue)
+[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html)
+
+A fast Golang queue using a ring-buffer, based on the version suggested by Dariusz Górecki.
+Using this instead of other, simpler, queue implementations (slice+append or linked list) provides
+substantial memory and time benefits, and fewer GC pauses.
+
+The queue implemented here is as fast as it is in part because it is *not* thread-safe.
+
+Follows semantic versioning using https://gopkg.in/ - import from
+[`gopkg.in/eapache/queue.v1`](https://gopkg.in/eapache/queue.v1)
+for guaranteed API stability.
diff --git a/vendor/github.com/eapache/queue/queue.go b/vendor/github.com/eapache/queue/queue.go
new file mode 100644
index 0000000..71d1acd
--- /dev/null
+++ b/vendor/github.com/eapache/queue/queue.go
@@ -0,0 +1,102 @@
+/*
+Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki.
+Using this instead of other, simpler, queue implementations (slice+append or linked list) provides
+substantial memory and time benefits, and fewer GC pauses.
+
+The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe.
+*/
+package queue
+
+// minQueueLen is smallest capacity that queue may have.
+// Must be power of 2 for bitwise modulus: x % n == x & (n - 1).
+const minQueueLen = 16
+
+// Queue represents a single instance of the queue data structure.
+type Queue struct {
+	buf               []interface{}
+	head, tail, count int
+}
+
+// New constructs and returns a new Queue.
+func New() *Queue {
+	return &Queue{
+		buf: make([]interface{}, minQueueLen),
+	}
+}
+
+// Length returns the number of elements currently stored in the queue.
+func (q *Queue) Length() int {
+	return q.count
+}
+
+// resizes the queue to fit exactly twice its current contents
+// this can result in shrinking if the queue is less than half-full
+func (q *Queue) resize() {
+	newBuf := make([]interface{}, q.count<<1)
+
+	if q.tail > q.head {
+		copy(newBuf, q.buf[q.head:q.tail])
+	} else {
+		n := copy(newBuf, q.buf[q.head:])
+		copy(newBuf[n:], q.buf[:q.tail])
+	}
+
+	q.head = 0
+	q.tail = q.count
+	q.buf = newBuf
+}
+
+// Add puts an element on the end of the queue.
+func (q *Queue) Add(elem interface{}) {
+	if q.count == len(q.buf) {
+		q.resize()
+	}
+
+	q.buf[q.tail] = elem
+	// bitwise modulus
+	q.tail = (q.tail + 1) & (len(q.buf) - 1)
+	q.count++
+}
+
+// Peek returns the element at the head of the queue. This call panics
+// if the queue is empty.
+func (q *Queue) Peek() interface{} {
+	if q.count <= 0 {
+		panic("queue: Peek() called on empty queue")
+	}
+	return q.buf[q.head]
+}
+
+// Get returns the element at index i in the queue. If the index is
+// invalid, the call will panic. This method accepts both positive and
+// negative index values. Index 0 refers to the first element, and
+// index -1 refers to the last.
+func (q *Queue) Get(i int) interface{} {
+	// If indexing backwards, convert to positive index.
+	if i < 0 {
+		i += q.count
+	}
+	if i < 0 || i >= q.count {
+		panic("queue: Get() called with index out of range")
+	}
+	// bitwise modulus
+	return q.buf[(q.head+i)&(len(q.buf)-1)]
+}
+
+// Remove removes and returns the element from the front of the queue. If the
+// queue is empty, the call will panic.
+func (q *Queue) Remove() interface{} {
+	if q.count <= 0 {
+		panic("queue: Remove() called on empty queue")
+	}
+	ret := q.buf[q.head]
+	q.buf[q.head] = nil
+	// bitwise modulus
+	q.head = (q.head + 1) & (len(q.buf) - 1)
+	q.count--
+	// Resize down if buffer 1/4 full.
+	if len(q.buf) > minQueueLen && (q.count<<2) == len(q.buf) {
+		q.resize()
+	}
+	return ret
+}
diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS
new file mode 100644
index 0000000..3d97fc7
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/AUTHORS
@@ -0,0 +1,15 @@
+# This is the official list of GoGo authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS file, which
+# lists people.  For example, employees are listed in CONTRIBUTORS,
+# but not in AUTHORS, because the employer holds the copyright.
+
+# Names should be added to this file as one of
+#     Organization's name
+#     Individual's name <submission email address>
+#     Individual's name <submission email address> <email2> <emailN>
+
+# Please keep the list sorted.
+
+Sendgrid, Inc
+Vastech SA (PTY) LTD
+Walter Schulze <awalterschulze@gmail.com>
diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS
new file mode 100644
index 0000000..1b4f6c2
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/CONTRIBUTORS
@@ -0,0 +1,23 @@
+Anton Povarov <anton.povarov@gmail.com>
+Brian Goff <cpuguy83@gmail.com>
+Clayton Coleman <ccoleman@redhat.com>
+Denis Smirnov <denis.smirnov.91@gmail.com>
+DongYun Kang <ceram1000@gmail.com>
+Dwayne Schultz <dschultz@pivotal.io>
+Georg Apitz <gapitz@pivotal.io>
+Gustav Paul <gustav.paul@gmail.com>
+Johan Brandhorst <johan.brandhorst@gmail.com>
+John Shahid <jvshahid@gmail.com>
+John Tuley <john@tuley.org>
+Laurent <laurent@adyoulike.com>
+Patrick Lee <patrick@dropbox.com>
+Peter Edge <peter.edge@gmail.com>
+Roger Johansson <rogeralsing@gmail.com>
+Sam Nguyen <sam.nguyen@sendgrid.com>
+Sergio Arbeo <serabe@gmail.com>
+Stephen J Day <stephen.day@docker.com>
+Tamir Duberstein <tamird@gmail.com>
+Todd Eisenberger <teisenberger@dropbox.com>
+Tormod Erevik Lea <tormodlea@gmail.com>
+Vyacheslav Kim <kane@sendgrid.com>
+Walter Schulze <awalterschulze@gmail.com>
diff --git a/vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS b/vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS
new file mode 100644
index 0000000..b368efb
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS
@@ -0,0 +1,5 @@
+The contributors to the Go protobuf repository:
+
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
\ No newline at end of file
diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE
new file mode 100644
index 0000000..f57de90
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/LICENSE
@@ -0,0 +1,35 @@
+Copyright (c) 2013, The GoGo Authors. All rights reserved.
+
+Protocol Buffers for Go with Gadgets
+
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors.  All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+    * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/github.com/gogo/protobuf/gogoproto/Makefile
new file mode 100644
index 0000000..0b4659b
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/gogoproto/Makefile
@@ -0,0 +1,37 @@
+# Protocol Buffers for Go with Gadgets
+#
+# Copyright (c) 2013, The GoGo Authors. All rights reserved.
+# http://github.com/gogo/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+regenerate:
+	go install github.com/gogo/protobuf/protoc-gen-gogo
+	protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto
+
+restore:
+	cp gogo.pb.golden gogo.pb.go
+
+preserve:
+	cp gogo.pb.go gogo.pb.golden
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go
new file mode 100644
index 0000000..081c86f
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go
@@ -0,0 +1,169 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package gogoproto provides extensions for protocol buffers to achieve:
+
+  - fast marshalling and unmarshalling.
+  - peace of mind by optionally generating test and benchmark code.
+  - more canonical Go structures.
+  - less typing by optionally generating extra helper code.
+  - goprotobuf compatibility
+
+More Canonical Go Structures
+
+A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs.
+You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct.
+Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions.
+
+  - nullable, if false, a field is generated without a pointer (see warning below).
+  - embed, if true, the field is generated as an embedded field.
+  - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128
+  - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames.
+  - casttype (beta), Changes the generated fieldtype.  All generated code assumes that this type is castable to the protocol buffer field type.  It does not work for structs or enums.
+  - castkey (beta), Changes the generated fieldtype for a map key.  All generated code assumes that this type is castable to the protocol buffer field type.  Only supported on maps.
+  - castvalue (beta), Changes the generated fieldtype for a map value.  All generated code assumes that this type is castable to the protocol buffer field type.  Only supported on maps.
+
+Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset.
+
+Let us look at:
+
+	github.com/gogo/protobuf/test/example/example.proto
+
+for a quicker overview.
+
+The following message:
+
+  package test;
+
+  import "github.com/gogo/protobuf/gogoproto/gogo.proto";
+
+	message A {
+		optional string Description = 1 [(gogoproto.nullable) = false];
+		optional int64 Number = 2 [(gogoproto.nullable) = false];
+		optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false];
+	}
+
+Will generate a go struct which looks a lot like this:
+
+	type A struct {
+		Description string
+		Number      int64
+		Id          github_com_gogo_protobuf_test_custom.Uuid
+	}
+
+You will see there are no pointers, since all fields are non-nullable.
+You will also see a custom type which marshals to a string.
+Be warned it is your responsibility to test your custom types thoroughly.
+You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods.
+
+Next we will embed the message A in message B.
+
+	message B {
+		optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true];
+		repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false];
+	}
+
+See below that A is embedded in B.
+
+	type B struct {
+		A
+		G []github_com_gogo_protobuf_test_custom.Uint128
+	}
+
+Also see the repeated custom type.
+
+	type Uint128 [2]uint64
+
+Next we will create a custom name for one of our fields.
+
+	message C {
+		optional int64 size = 1 [(gogoproto.customname) = "MySize"];
+	}
+
+See below that the field's name is MySize and not Size.
+
+	type C struct {
+		MySize		*int64
+	}
+
+The is useful when having a protocol buffer message with a field name which conflicts with a generated method.
+As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error.
+Using customname you can fix this error without changing the field name.
+This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable.
+
+Gogoprotobuf also has some more subtle changes, these could be changed back:
+
+  - the generated package name for imports do not have the extra /filename.pb,
+  but are actually the imports specified in the .proto file.
+
+Gogoprotobuf also has lost some features which should be brought back with time:
+
+  - Marshalling and unmarshalling with reflect and without the unsafe package,
+  this requires work in pointer_reflect.go
+
+Why does nullable break protocol buffer specifications:
+
+The protocol buffer specification states, somewhere, that you should be able to tell whether a
+field is set or unset.  With the option nullable=false this feature is lost,
+since your non-nullable fields will always be set.  It can be seen as a layer on top of
+protocol buffers, where before and after marshalling all non-nullable fields are set
+and they cannot be unset.
+
+Goprotobuf Compatibility:
+
+Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers.
+Gogoprotobuf generates the same code as goprotobuf if no extensions are used.
+The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf:
+
+  - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto.
+  - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix
+  - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method.
+  - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face
+  - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method.
+  - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension
+  - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields.
+  - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway).
+
+Less Typing and Peace of Mind is explained in their specific plugin folders godoc:
+
+	- github.com/gogo/protobuf/plugin/<extension_name>
+
+If you do not use any of these extension the code that is generated
+will be the same as if goprotobuf has generated it.
+
+The most complete way to see examples is to look at
+
+	github.com/gogo/protobuf/test/thetest.proto
+
+Gogoprototest is a seperate project,
+because we want to keep gogoprotobuf independent of goprotobuf,
+but we still want to test it thoroughly.
+
+*/
+package gogoproto
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden
new file mode 100644
index 0000000..f6502e4
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden
@@ -0,0 +1,45 @@
+// Code generated by protoc-gen-go.
+// source: gogo.proto
+// DO NOT EDIT!
+
+package gogoproto
+
+import proto "github.com/gogo/protobuf/proto"
+import json "encoding/json"
+import math "math"
+import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
+
+// Reference proto, json, and math imports to suppress error if they are not otherwise used.
+var _ = proto.Marshal
+var _ = &json.SyntaxError{}
+var _ = math.Inf
+
+var E_Nullable = &proto.ExtensionDesc{
+	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         51235,
+	Name:          "gogoproto.nullable",
+	Tag:           "varint,51235,opt,name=nullable",
+}
+
+var E_Embed = &proto.ExtensionDesc{
+	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         51236,
+	Name:          "gogoproto.embed",
+	Tag:           "varint,51236,opt,name=embed",
+}
+
+var E_Customtype = &proto.ExtensionDesc{
+	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtensionType: (*string)(nil),
+	Field:         51237,
+	Name:          "gogoproto.customtype",
+	Tag:           "bytes,51237,opt,name=customtype",
+}
+
+func init() {
+	proto.RegisterExtension(E_Nullable)
+	proto.RegisterExtension(E_Embed)
+	proto.RegisterExtension(E_Customtype)
+}
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto
new file mode 100644
index 0000000..b80c856
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto
@@ -0,0 +1,144 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto2";
+package gogoproto;
+
+import "google/protobuf/descriptor.proto";
+
+option java_package = "com.google.protobuf";
+option java_outer_classname = "GoGoProtos";
+option go_package = "github.com/gogo/protobuf/gogoproto";
+
+extend google.protobuf.EnumOptions {
+	optional bool goproto_enum_prefix = 62001;
+	optional bool goproto_enum_stringer = 62021;
+	optional bool enum_stringer = 62022;
+	optional string enum_customname = 62023;
+	optional bool enumdecl = 62024;
+}
+
+extend google.protobuf.EnumValueOptions {
+	optional string enumvalue_customname = 66001;
+}
+
+extend google.protobuf.FileOptions {
+	optional bool goproto_getters_all = 63001;
+	optional bool goproto_enum_prefix_all = 63002;
+	optional bool goproto_stringer_all = 63003;
+	optional bool verbose_equal_all = 63004;
+	optional bool face_all = 63005;
+	optional bool gostring_all = 63006;
+	optional bool populate_all = 63007;
+	optional bool stringer_all = 63008;
+	optional bool onlyone_all = 63009;
+
+	optional bool equal_all = 63013;
+	optional bool description_all = 63014;
+	optional bool testgen_all = 63015;
+	optional bool benchgen_all = 63016;
+	optional bool marshaler_all = 63017;
+	optional bool unmarshaler_all = 63018;
+	optional bool stable_marshaler_all = 63019;
+
+	optional bool sizer_all = 63020;
+
+	optional bool goproto_enum_stringer_all = 63021;
+	optional bool enum_stringer_all = 63022;
+
+	optional bool unsafe_marshaler_all = 63023;
+	optional bool unsafe_unmarshaler_all = 63024;
+
+	optional bool goproto_extensions_map_all = 63025;
+	optional bool goproto_unrecognized_all = 63026;
+	optional bool gogoproto_import = 63027;
+	optional bool protosizer_all = 63028;
+	optional bool compare_all = 63029;
+    optional bool typedecl_all = 63030;
+    optional bool enumdecl_all = 63031;
+
+	optional bool goproto_registration = 63032;
+	optional bool messagename_all = 63033;
+
+	optional bool goproto_sizecache_all = 63034;
+	optional bool goproto_unkeyed_all = 63035;
+}
+
+extend google.protobuf.MessageOptions {
+	optional bool goproto_getters = 64001;
+	optional bool goproto_stringer = 64003;
+	optional bool verbose_equal = 64004;
+	optional bool face = 64005;
+	optional bool gostring = 64006;
+	optional bool populate = 64007;
+	optional bool stringer = 67008;
+	optional bool onlyone = 64009;
+
+	optional bool equal = 64013;
+	optional bool description = 64014;
+	optional bool testgen = 64015;
+	optional bool benchgen = 64016;
+	optional bool marshaler = 64017;
+	optional bool unmarshaler = 64018;
+	optional bool stable_marshaler = 64019;
+
+	optional bool sizer = 64020;
+
+	optional bool unsafe_marshaler = 64023;
+	optional bool unsafe_unmarshaler = 64024;
+
+	optional bool goproto_extensions_map = 64025;
+	optional bool goproto_unrecognized = 64026;
+
+	optional bool protosizer = 64028;
+	optional bool compare = 64029;
+
+	optional bool typedecl = 64030;
+
+	optional bool messagename = 64033;
+
+	optional bool goproto_sizecache = 64034;
+	optional bool goproto_unkeyed = 64035;
+}
+
+extend google.protobuf.FieldOptions {
+	optional bool nullable = 65001;
+	optional bool embed = 65002;
+	optional string customtype = 65003;
+	optional string customname = 65004;
+	optional string jsontag = 65005;
+	optional string moretags = 65006;
+	optional string casttype = 65007;
+	optional string castkey = 65008;
+	optional string castvalue = 65009;
+
+	optional bool stdtime = 65010;
+	optional bool stdduration = 65011;
+	optional bool wktpointer = 65012;
+
+}
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go
new file mode 100644
index 0000000..390d4e4
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go
@@ -0,0 +1,415 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gogoproto
+
+import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
+import proto "github.com/gogo/protobuf/proto"
+
+func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Embed, false)
+}
+
+func IsNullable(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Nullable, true)
+}
+
+func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Stdtime, false)
+}
+
+func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Stdduration, false)
+}
+
+func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue"
+}
+
+func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue"
+}
+
+func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value"
+}
+
+func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value"
+}
+
+func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value"
+}
+
+func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value"
+}
+
+func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue"
+}
+
+func IsStdString(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue"
+}
+
+func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue"
+}
+
+func IsStdType(field *google_protobuf.FieldDescriptorProto) bool {
+	return (IsStdTime(field) || IsStdDuration(field) ||
+		IsStdDouble(field) || IsStdFloat(field) ||
+		IsStdInt64(field) || IsStdUInt64(field) ||
+		IsStdInt32(field) || IsStdUInt32(field) ||
+		IsStdBool(field) ||
+		IsStdString(field) || IsStdBytes(field))
+}
+
+func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false)
+}
+
+func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool {
+	nullable := IsNullable(field)
+	if field.IsMessage() || IsCustomType(field) {
+		return nullable
+	}
+	if proto3 {
+		return false
+	}
+	return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES
+}
+
+func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool {
+	typ := GetCustomType(field)
+	if len(typ) > 0 {
+		return true
+	}
+	return false
+}
+
+func IsCastType(field *google_protobuf.FieldDescriptorProto) bool {
+	typ := GetCastType(field)
+	if len(typ) > 0 {
+		return true
+	}
+	return false
+}
+
+func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool {
+	typ := GetCastKey(field)
+	if len(typ) > 0 {
+		return true
+	}
+	return false
+}
+
+func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool {
+	typ := GetCastValue(field)
+	if len(typ) > 0 {
+		return true
+	}
+	return false
+}
+
+func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
+	return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true))
+}
+
+func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true))
+}
+
+func GetCustomType(field *google_protobuf.FieldDescriptorProto) string {
+	if field == nil {
+		return ""
+	}
+	if field.Options != nil {
+		v, err := proto.GetExtension(field.Options, E_Customtype)
+		if err == nil && v.(*string) != nil {
+			return *(v.(*string))
+		}
+	}
+	return ""
+}
+
+func GetCastType(field *google_protobuf.FieldDescriptorProto) string {
+	if field == nil {
+		return ""
+	}
+	if field.Options != nil {
+		v, err := proto.GetExtension(field.Options, E_Casttype)
+		if err == nil && v.(*string) != nil {
+			return *(v.(*string))
+		}
+	}
+	return ""
+}
+
+func GetCastKey(field *google_protobuf.FieldDescriptorProto) string {
+	if field == nil {
+		return ""
+	}
+	if field.Options != nil {
+		v, err := proto.GetExtension(field.Options, E_Castkey)
+		if err == nil && v.(*string) != nil {
+			return *(v.(*string))
+		}
+	}
+	return ""
+}
+
+func GetCastValue(field *google_protobuf.FieldDescriptorProto) string {
+	if field == nil {
+		return ""
+	}
+	if field.Options != nil {
+		v, err := proto.GetExtension(field.Options, E_Castvalue)
+		if err == nil && v.(*string) != nil {
+			return *(v.(*string))
+		}
+	}
+	return ""
+}
+
+func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool {
+	name := GetCustomName(field)
+	if len(name) > 0 {
+		return true
+	}
+	return false
+}
+
+func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool {
+	name := GetEnumCustomName(field)
+	if len(name) > 0 {
+		return true
+	}
+	return false
+}
+
+func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool {
+	name := GetEnumValueCustomName(field)
+	if len(name) > 0 {
+		return true
+	}
+	return false
+}
+
+func GetCustomName(field *google_protobuf.FieldDescriptorProto) string {
+	if field == nil {
+		return ""
+	}
+	if field.Options != nil {
+		v, err := proto.GetExtension(field.Options, E_Customname)
+		if err == nil && v.(*string) != nil {
+			return *(v.(*string))
+		}
+	}
+	return ""
+}
+
+func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string {
+	if field == nil {
+		return ""
+	}
+	if field.Options != nil {
+		v, err := proto.GetExtension(field.Options, E_EnumCustomname)
+		if err == nil && v.(*string) != nil {
+			return *(v.(*string))
+		}
+	}
+	return ""
+}
+
+func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string {
+	if field == nil {
+		return ""
+	}
+	if field.Options != nil {
+		v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname)
+		if err == nil && v.(*string) != nil {
+			return *(v.(*string))
+		}
+	}
+	return ""
+}
+
+func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string {
+	if field == nil {
+		return nil
+	}
+	if field.Options != nil {
+		v, err := proto.GetExtension(field.Options, E_Jsontag)
+		if err == nil && v.(*string) != nil {
+			return (v.(*string))
+		}
+	}
+	return nil
+}
+
+func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string {
+	if field == nil {
+		return nil
+	}
+	if field.Options != nil {
+		v, err := proto.GetExtension(field.Options, E_Moretags)
+		if err == nil && v.(*string) != nil {
+			return (v.(*string))
+		}
+	}
+	return nil
+}
+
+type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool
+
+func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
+	return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true))
+}
+
+func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true))
+}
+
+func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true))
+}
+
+func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false))
+}
+
+func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false))
+}
+
+func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false))
+}
+
+func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false))
+}
+
+func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false))
+}
+
+func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false))
+}
+
+func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false))
+}
+
+func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false))
+}
+
+func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false))
+}
+
+func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false))
+}
+
+func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false))
+}
+
+func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false))
+}
+
+func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false))
+}
+
+func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false))
+}
+
+func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false))
+}
+
+func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
+	return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true))
+}
+
+func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
+	return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false))
+}
+
+func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false))
+}
+
+func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false))
+}
+
+func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true))
+}
+
+func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true))
+}
+
+func IsProto3(file *google_protobuf.FileDescriptorProto) bool {
+	return file.GetSyntax() == "proto3"
+}
+
+func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool {
+	return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true)
+}
+
+func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false))
+}
+
+func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool {
+	return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false)
+}
+
+func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false))
+}
+
+func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true))
+}
+
+func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true))
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile
new file mode 100644
index 0000000..00d65f3
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors.  All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+	go install
+
+test: install generate-test-pbs
+	go test
+
+
+generate-test-pbs:
+	make install
+	make -C test_proto
+	make -C proto3_proto
+	make
diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go
new file mode 100644
index 0000000..a26b046
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/clone.go
@@ -0,0 +1,258 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+	"fmt"
+	"log"
+	"reflect"
+	"strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(src Message) Message {
+	in := reflect.ValueOf(src)
+	if in.IsNil() {
+		return src
+	}
+	out := reflect.New(in.Type().Elem())
+	dst := out.Interface().(Message)
+	Merge(dst, src)
+	return dst
+}
+
+// Merger is the interface representing objects that can merge messages of the same type.
+type Merger interface {
+	// Merge merges src into this message.
+	// Required and optional fields that are set in src will be set to that value in dst.
+	// Elements of repeated fields will be appended.
+	//
+	// Merge may panic if called with a different argument type than the receiver.
+	Merge(src Message)
+}
+
+// generatedMerger is the custom merge method that generated protos will have.
+// We must add this method since a generate Merge method will conflict with
+// many existing protos that have a Merge data field already defined.
+type generatedMerger interface {
+	XXX_Merge(src Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+	if m, ok := dst.(Merger); ok {
+		m.Merge(src)
+		return
+	}
+
+	in := reflect.ValueOf(src)
+	out := reflect.ValueOf(dst)
+	if out.IsNil() {
+		panic("proto: nil destination")
+	}
+	if in.Type() != out.Type() {
+		panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
+	}
+	if in.IsNil() {
+		return // Merge from nil src is a noop
+	}
+	if m, ok := dst.(generatedMerger); ok {
+		m.XXX_Merge(src)
+		return
+	}
+	mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+	sprop := GetProperties(in.Type())
+	for i := 0; i < in.NumField(); i++ {
+		f := in.Type().Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+	}
+
+	if emIn, ok := in.Addr().Interface().(extensionsBytes); ok {
+		emOut := out.Addr().Interface().(extensionsBytes)
+		bIn := emIn.GetExtensions()
+		bOut := emOut.GetExtensions()
+		*bOut = append(*bOut, *bIn...)
+	} else if emIn, err := extendable(in.Addr().Interface()); err == nil {
+		emOut, _ := extendable(out.Addr().Interface())
+		mIn, muIn := emIn.extensionsRead()
+		if mIn != nil {
+			mOut := emOut.extensionsWrite()
+			muIn.Lock()
+			mergeExtension(mOut, mIn)
+			muIn.Unlock()
+		}
+	}
+
+	uf := in.FieldByName("XXX_unrecognized")
+	if !uf.IsValid() {
+		return
+	}
+	uin := uf.Bytes()
+	if len(uin) > 0 {
+		out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+	}
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+	if in.Type() == protoMessageType {
+		if !in.IsNil() {
+			if out.IsNil() {
+				out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+			} else {
+				Merge(out.Interface().(Message), in.Interface().(Message))
+			}
+		}
+		return
+	}
+	switch in.Kind() {
+	case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+		reflect.String, reflect.Uint32, reflect.Uint64:
+		if !viaPtr && isProto3Zero(in) {
+			return
+		}
+		out.Set(in)
+	case reflect.Interface:
+		// Probably a oneof field; copy non-nil values.
+		if in.IsNil() {
+			return
+		}
+		// Allocate destination if it is not set, or set to a different type.
+		// Otherwise we will merge as normal.
+		if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+			out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+		}
+		mergeAny(out.Elem(), in.Elem(), false, nil)
+	case reflect.Map:
+		if in.Len() == 0 {
+			return
+		}
+		if out.IsNil() {
+			out.Set(reflect.MakeMap(in.Type()))
+		}
+		// For maps with value types of *T or []byte we need to deep copy each value.
+		elemKind := in.Type().Elem().Kind()
+		for _, key := range in.MapKeys() {
+			var val reflect.Value
+			switch elemKind {
+			case reflect.Ptr:
+				val = reflect.New(in.Type().Elem().Elem())
+				mergeAny(val, in.MapIndex(key), false, nil)
+			case reflect.Slice:
+				val = in.MapIndex(key)
+				val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+			default:
+				val = in.MapIndex(key)
+			}
+			out.SetMapIndex(key, val)
+		}
+	case reflect.Ptr:
+		if in.IsNil() {
+			return
+		}
+		if out.IsNil() {
+			out.Set(reflect.New(in.Elem().Type()))
+		}
+		mergeAny(out.Elem(), in.Elem(), true, nil)
+	case reflect.Slice:
+		if in.IsNil() {
+			return
+		}
+		if in.Type().Elem().Kind() == reflect.Uint8 {
+			// []byte is a scalar bytes field, not a repeated field.
+
+			// Edge case: if this is in a proto3 message, a zero length
+			// bytes field is considered the zero value, and should not
+			// be merged.
+			if prop != nil && prop.proto3 && in.Len() == 0 {
+				return
+			}
+
+			// Make a deep copy.
+			// Append to []byte{} instead of []byte(nil) so that we never end up
+			// with a nil result.
+			out.SetBytes(append([]byte{}, in.Bytes()...))
+			return
+		}
+		n := in.Len()
+		if out.IsNil() {
+			out.Set(reflect.MakeSlice(in.Type(), 0, n))
+		}
+		switch in.Type().Elem().Kind() {
+		case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+			reflect.String, reflect.Uint32, reflect.Uint64:
+			out.Set(reflect.AppendSlice(out, in))
+		default:
+			for i := 0; i < n; i++ {
+				x := reflect.Indirect(reflect.New(in.Type().Elem()))
+				mergeAny(x, in.Index(i), false, nil)
+				out.Set(reflect.Append(out, x))
+			}
+		}
+	case reflect.Struct:
+		mergeStruct(out, in)
+	default:
+		// unknown type, so not a protocol buffer
+		log.Printf("proto: don't know how to copy %v", in)
+	}
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+	for extNum, eIn := range in {
+		eOut := Extension{desc: eIn.desc}
+		if eIn.value != nil {
+			v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+			mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+			eOut.value = v.Interface()
+		}
+		if eIn.enc != nil {
+			eOut.enc = make([]byte, len(eIn.enc))
+			copy(eOut.enc, eIn.enc)
+		}
+
+		out[extNum] = eOut
+	}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go
new file mode 100644
index 0000000..2455248
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go
@@ -0,0 +1,39 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import "reflect"
+
+type custom interface {
+	Marshal() ([]byte, error)
+	Unmarshal(data []byte) error
+	Size() int
+}
+
+var customType = reflect.TypeOf((*custom)(nil)).Elem()
diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go
new file mode 100644
index 0000000..d9aa3c4
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/decode.go
@@ -0,0 +1,428 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"io"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+	for shift := uint(0); shift < 64; shift += 7 {
+		if n >= len(buf) {
+			return 0, 0
+		}
+		b := uint64(buf[n])
+		n++
+		x |= (b & 0x7F) << shift
+		if (b & 0x80) == 0 {
+			return x, n
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+	i := p.index
+	l := len(p.buf)
+
+	for shift := uint(0); shift < 64; shift += 7 {
+		if i >= l {
+			err = io.ErrUnexpectedEOF
+			return
+		}
+		b := p.buf[i]
+		i++
+		x |= (uint64(b) & 0x7F) << shift
+		if b < 0x80 {
+			p.index = i
+			return
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	err = errOverflow
+	return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+	i := p.index
+	buf := p.buf
+
+	if i >= len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	} else if buf[i] < 0x80 {
+		p.index++
+		return uint64(buf[i]), nil
+	} else if len(buf)-i < 10 {
+		return p.decodeVarintSlow()
+	}
+
+	var b uint64
+	// we already checked the first byte
+	x = uint64(buf[i]) - 0x80
+	i++
+
+	b = uint64(buf[i])
+	i++
+	x += b << 7
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 7
+
+	b = uint64(buf[i])
+	i++
+	x += b << 14
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 14
+
+	b = uint64(buf[i])
+	i++
+	x += b << 21
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 21
+
+	b = uint64(buf[i])
+	i++
+	x += b << 28
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 28
+
+	b = uint64(buf[i])
+	i++
+	x += b << 35
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 35
+
+	b = uint64(buf[i])
+	i++
+	x += b << 42
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 42
+
+	b = uint64(buf[i])
+	i++
+	x += b << 49
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 49
+
+	b = uint64(buf[i])
+	i++
+	x += b << 56
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 56
+
+	b = uint64(buf[i])
+	i++
+	x += b << 63
+	if b&0x80 == 0 {
+		goto done
+	}
+	// x -= 0x80 << 63 // Always zero.
+
+	return 0, errOverflow
+
+done:
+	p.index = i
+	return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+	// x, err already 0
+	i := p.index + 8
+	if i < 0 || i > len(p.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	p.index = i
+
+	x = uint64(p.buf[i-8])
+	x |= uint64(p.buf[i-7]) << 8
+	x |= uint64(p.buf[i-6]) << 16
+	x |= uint64(p.buf[i-5]) << 24
+	x |= uint64(p.buf[i-4]) << 32
+	x |= uint64(p.buf[i-3]) << 40
+	x |= uint64(p.buf[i-2]) << 48
+	x |= uint64(p.buf[i-1]) << 56
+	return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+	// x, err already 0
+	i := p.index + 4
+	if i < 0 || i > len(p.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	p.index = i
+
+	x = uint64(p.buf[i-4])
+	x |= uint64(p.buf[i-3]) << 8
+	x |= uint64(p.buf[i-2]) << 16
+	x |= uint64(p.buf[i-1]) << 24
+	return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+	x, err = p.DecodeVarint()
+	if err != nil {
+		return
+	}
+	x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+	return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from  the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+	x, err = p.DecodeVarint()
+	if err != nil {
+		return
+	}
+	x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+	return
+}
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+	n, err := p.DecodeVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	nb := int(n)
+	if nb < 0 {
+		return nil, fmt.Errorf("proto: bad byte length %d", nb)
+	}
+	end := p.index + nb
+	if end < p.index || end > len(p.buf) {
+		return nil, io.ErrUnexpectedEOF
+	}
+
+	if !alloc {
+		// todo: check if can get more uses of alloc=false
+		buf = p.buf[p.index:end]
+		p.index += nb
+		return
+	}
+
+	buf = make([]byte, nb)
+	copy(buf, p.buf[p.index:])
+	p.index += nb
+	return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+	buf, err := p.DecodeRawBytes(false)
+	if err != nil {
+		return
+	}
+	return string(buf), nil
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves.  The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+// Unmarshal implementations should not clear the receiver.
+// Any unmarshaled data should be merged into the receiver.
+// Callers of Unmarshal that do not want to retain existing data
+// should Reset the receiver before calling Unmarshal.
+type Unmarshaler interface {
+	Unmarshal([]byte) error
+}
+
+// newUnmarshaler is the interface representing objects that can
+// unmarshal themselves. The semantics are identical to Unmarshaler.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newUnmarshaler interface {
+	XXX_Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb.  If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+	pb.Reset()
+	if u, ok := pb.(newUnmarshaler); ok {
+		return u.XXX_Unmarshal(buf)
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		return u.Unmarshal(buf)
+	}
+	return NewBuffer(buf).Unmarshal(pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb.  If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+	if u, ok := pb.(newUnmarshaler); ok {
+		return u.XXX_Unmarshal(buf)
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		// NOTE: The history of proto have unfortunately been inconsistent
+		// whether Unmarshaler should or should not implicitly clear itself.
+		// Some implementations do, most do not.
+		// Thus, calling this here may or may not do what people want.
+		//
+		// See https://github.com/golang/protobuf/issues/424
+		return u.Unmarshal(buf)
+	}
+	return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+	enc, err := p.DecodeRawBytes(false)
+	if err != nil {
+		return err
+	}
+	return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+// StartGroup tag is already consumed. This function consumes
+// EndGroup tag.
+func (p *Buffer) DecodeGroup(pb Message) error {
+	b := p.buf[p.index:]
+	x, y := findEndGroup(b)
+	if x < 0 {
+		return io.ErrUnexpectedEOF
+	}
+	err := Unmarshal(b[:x], pb)
+	p.index += y
+	return err
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb.  If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+	// If the object can unmarshal itself, let it.
+	if u, ok := pb.(newUnmarshaler); ok {
+		err := u.XXX_Unmarshal(p.buf[p.index:])
+		p.index = len(p.buf)
+		return err
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		// NOTE: The history of proto have unfortunately been inconsistent
+		// whether Unmarshaler should or should not implicitly clear itself.
+		// Some implementations do, most do not.
+		// Thus, calling this here may or may not do what people want.
+		//
+		// See https://github.com/golang/protobuf/issues/424
+		err := u.Unmarshal(p.buf[p.index:])
+		p.index = len(p.buf)
+		return err
+	}
+
+	// Slow workaround for messages that aren't Unmarshalers.
+	// This includes some hand-coded .pb.go files and
+	// bootstrap protos.
+	// TODO: fix all of those and then add Unmarshal to
+	// the Message interface. Then:
+	// The cast above and code below can be deleted.
+	// The old unmarshaler can be deleted.
+	// Clients can call Unmarshal directly (can already do that, actually).
+	var info InternalMessageInfo
+	err := info.Unmarshal(pb, p.buf[p.index:])
+	p.index = len(p.buf)
+	return err
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go
new file mode 100644
index 0000000..fe1bd7d
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/discard.go
@@ -0,0 +1,350 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+	"sync/atomic"
+)
+
+type generatedDiscarder interface {
+	XXX_DiscardUnknown()
+}
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+//
+// For proto2 messages, the unknown fields of message extensions are only
+// discarded from messages that have been accessed via GetExtension.
+func DiscardUnknown(m Message) {
+	if m, ok := m.(generatedDiscarder); ok {
+		m.XXX_DiscardUnknown()
+		return
+	}
+	// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
+	// but the master branch has no implementation for InternalMessageInfo,
+	// so it would be more work to replicate that approach.
+	discardLegacy(m)
+}
+
+// DiscardUnknown recursively discards all unknown fields.
+func (a *InternalMessageInfo) DiscardUnknown(m Message) {
+	di := atomicLoadDiscardInfo(&a.discard)
+	if di == nil {
+		di = getDiscardInfo(reflect.TypeOf(m).Elem())
+		atomicStoreDiscardInfo(&a.discard, di)
+	}
+	di.discard(toPointer(&m))
+}
+
+type discardInfo struct {
+	typ reflect.Type
+
+	initialized int32 // 0: only typ is valid, 1: everything is valid
+	lock        sync.Mutex
+
+	fields       []discardFieldInfo
+	unrecognized field
+}
+
+type discardFieldInfo struct {
+	field   field // Offset of field, guaranteed to be valid
+	discard func(src pointer)
+}
+
+var (
+	discardInfoMap  = map[reflect.Type]*discardInfo{}
+	discardInfoLock sync.Mutex
+)
+
+func getDiscardInfo(t reflect.Type) *discardInfo {
+	discardInfoLock.Lock()
+	defer discardInfoLock.Unlock()
+	di := discardInfoMap[t]
+	if di == nil {
+		di = &discardInfo{typ: t}
+		discardInfoMap[t] = di
+	}
+	return di
+}
+
+func (di *discardInfo) discard(src pointer) {
+	if src.isNil() {
+		return // Nothing to do.
+	}
+
+	if atomic.LoadInt32(&di.initialized) == 0 {
+		di.computeDiscardInfo()
+	}
+
+	for _, fi := range di.fields {
+		sfp := src.offset(fi.field)
+		fi.discard(sfp)
+	}
+
+	// For proto2 messages, only discard unknown fields in message extensions
+	// that have been accessed via GetExtension.
+	if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
+		// Ignore lock since DiscardUnknown is not concurrency safe.
+		emm, _ := em.extensionsRead()
+		for _, mx := range emm {
+			if m, ok := mx.value.(Message); ok {
+				DiscardUnknown(m)
+			}
+		}
+	}
+
+	if di.unrecognized.IsValid() {
+		*src.offset(di.unrecognized).toBytes() = nil
+	}
+}
+
+func (di *discardInfo) computeDiscardInfo() {
+	di.lock.Lock()
+	defer di.lock.Unlock()
+	if di.initialized != 0 {
+		return
+	}
+	t := di.typ
+	n := t.NumField()
+
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+
+		dfi := discardFieldInfo{field: toField(&f)}
+		tf := f.Type
+
+		// Unwrap tf to get its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
+		}
+
+		switch tf.Kind() {
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
+			case isSlice: // E.g., []*pb.T
+				discardInfo := getDiscardInfo(tf)
+				dfi.discard = func(src pointer) {
+					sps := src.getPointerSlice()
+					for _, sp := range sps {
+						if !sp.isNil() {
+							discardInfo.discard(sp)
+						}
+					}
+				}
+			default: // E.g., *pb.T
+				discardInfo := getDiscardInfo(tf)
+				dfi.discard = func(src pointer) {
+					sp := src.getPointer()
+					if !sp.isNil() {
+						discardInfo.discard(sp)
+					}
+				}
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
+			default: // E.g., map[K]V
+				if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
+					dfi.discard = func(src pointer) {
+						sm := src.asPointerTo(tf).Elem()
+						if sm.Len() == 0 {
+							return
+						}
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							DiscardUnknown(val.Interface().(Message))
+						}
+					}
+				} else {
+					dfi.discard = func(pointer) {} // Noop
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
+			default: // E.g., interface{}
+				// TODO: Make this faster?
+				dfi.discard = func(src pointer) {
+					su := src.asPointerTo(tf).Elem()
+					if !su.IsNil() {
+						sv := su.Elem().Elem().Field(0)
+						if sv.Kind() == reflect.Ptr && sv.IsNil() {
+							return
+						}
+						switch sv.Type().Kind() {
+						case reflect.Ptr: // Proto struct (e.g., *T)
+							DiscardUnknown(sv.Interface().(Message))
+						}
+					}
+				}
+			}
+		default:
+			continue
+		}
+		di.fields = append(di.fields, dfi)
+	}
+
+	di.unrecognized = invalidField
+	if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+		if f.Type != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		di.unrecognized = toField(&f)
+	}
+
+	atomic.StoreInt32(&di.initialized, 1)
+}
+
+func discardLegacy(m Message) {
+	v := reflect.ValueOf(m)
+	if v.Kind() != reflect.Ptr || v.IsNil() {
+		return
+	}
+	v = v.Elem()
+	if v.Kind() != reflect.Struct {
+		return
+	}
+	t := v.Type()
+
+	for i := 0; i < v.NumField(); i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		vf := v.Field(i)
+		tf := f.Type
+
+		// Unwrap tf to get its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
+		}
+
+		switch tf.Kind() {
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
+			case isSlice: // E.g., []*pb.T
+				for j := 0; j < vf.Len(); j++ {
+					discardLegacy(vf.Index(j).Interface().(Message))
+				}
+			default: // E.g., *pb.T
+				discardLegacy(vf.Interface().(Message))
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
+			default: // E.g., map[K]V
+				tv := vf.Type().Elem()
+				if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
+					for _, key := range vf.MapKeys() {
+						val := vf.MapIndex(key)
+						discardLegacy(val.Interface().(Message))
+					}
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
+			default: // E.g., test_proto.isCommunique_Union interface
+				if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
+					vf = vf.Elem() // E.g., *test_proto.Communique_Msg
+					if !vf.IsNil() {
+						vf = vf.Elem()   // E.g., test_proto.Communique_Msg
+						vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
+						if vf.Kind() == reflect.Ptr {
+							discardLegacy(vf.Interface().(Message))
+						}
+					}
+				}
+			}
+		}
+	}
+
+	if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
+		if vf.Type() != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		vf.Set(reflect.ValueOf([]byte(nil)))
+	}
+
+	// For proto2 messages, only discard unknown fields in message extensions
+	// that have been accessed via GetExtension.
+	if em, err := extendable(m); err == nil {
+		// Ignore lock since discardLegacy is not concurrency safe.
+		emm, _ := em.extensionsRead()
+		for _, mx := range emm {
+			if m, ok := mx.value.(Message); ok {
+				discardLegacy(m)
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go
new file mode 100644
index 0000000..93464c9
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/duration.go
@@ -0,0 +1,100 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// This file implements conversions between google.protobuf.Duration
+// and time.Duration.
+
+import (
+	"errors"
+	"fmt"
+	"time"
+)
+
+const (
+	// Range of a Duration in seconds, as specified in
+	// google/protobuf/duration.proto. This is about 10,000 years in seconds.
+	maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
+	minSeconds = -maxSeconds
+)
+
+// validateDuration determines whether the Duration is valid according to the
+// definition in google/protobuf/duration.proto. A valid Duration
+// may still be too large to fit into a time.Duration (the range of Duration
+// is about 10,000 years, and the range of time.Duration is about 290).
+func validateDuration(d *duration) error {
+	if d == nil {
+		return errors.New("duration: nil Duration")
+	}
+	if d.Seconds < minSeconds || d.Seconds > maxSeconds {
+		return fmt.Errorf("duration: %#v: seconds out of range", d)
+	}
+	if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
+		return fmt.Errorf("duration: %#v: nanos out of range", d)
+	}
+	// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+	if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
+		return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d)
+	}
+	return nil
+}
+
+// DurationFromProto converts a Duration to a time.Duration. DurationFromProto
+// returns an error if the Duration is invalid or is too large to be
+// represented in a time.Duration.
+func durationFromProto(p *duration) (time.Duration, error) {
+	if err := validateDuration(p); err != nil {
+		return 0, err
+	}
+	d := time.Duration(p.Seconds) * time.Second
+	if int64(d/time.Second) != p.Seconds {
+		return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
+	}
+	if p.Nanos != 0 {
+		d += time.Duration(p.Nanos)
+		if (d < 0) != (p.Nanos < 0) {
+			return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
+		}
+	}
+	return d, nil
+}
+
+// DurationProto converts a time.Duration to a Duration.
+func durationProto(d time.Duration) *duration {
+	nanos := d.Nanoseconds()
+	secs := nanos / 1e9
+	nanos -= secs * 1e9
+	return &duration{
+		Seconds: secs,
+		Nanos:   int32(nanos),
+	}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go
new file mode 100644
index 0000000..e748e17
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go
@@ -0,0 +1,49 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2016, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"reflect"
+	"time"
+)
+
+var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem()
+
+type duration struct {
+	Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+	Nanos   int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+}
+
+func (m *duration) Reset()       { *m = duration{} }
+func (*duration) ProtoMessage()  {}
+func (*duration) String() string { return "duration<string>" }
+
+func init() {
+	RegisterType((*duration)(nil), "gogo.protobuf.proto.duration")
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go
new file mode 100644
index 0000000..3abfed2
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/encode.go
@@ -0,0 +1,203 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+	"errors"
+	"reflect"
+)
+
+var (
+	// errRepeatedHasNil is the error returned if Marshal is called with
+	// a struct with a repeated field containing a nil element.
+	errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+	// errOneofHasNil is the error returned if Marshal is called with
+	// a struct with a oneof field containing a nil element.
+	errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+	// ErrNil is the error returned if Marshal is called with nil.
+	ErrNil = errors.New("proto: Marshal called with nil")
+
+	// ErrTooLarge is the error returned if Marshal is called with a
+	// message that encodes to >2GB.
+	ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+	var buf [maxVarintBytes]byte
+	var n int
+	for n = 0; x > 127; n++ {
+		buf[n] = 0x80 | uint8(x&0x7F)
+		x >>= 7
+	}
+	buf[n] = uint8(x)
+	n++
+	return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+	for x >= 1<<7 {
+		p.buf = append(p.buf, uint8(x&0x7f|0x80))
+		x >>= 7
+	}
+	p.buf = append(p.buf, uint8(x))
+	return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+	switch {
+	case x < 1<<7:
+		return 1
+	case x < 1<<14:
+		return 2
+	case x < 1<<21:
+		return 3
+	case x < 1<<28:
+		return 4
+	case x < 1<<35:
+		return 5
+	case x < 1<<42:
+		return 6
+	case x < 1<<49:
+		return 7
+	case x < 1<<56:
+		return 8
+	case x < 1<<63:
+		return 9
+	}
+	return 10
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+	p.buf = append(p.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24),
+		uint8(x>>32),
+		uint8(x>>40),
+		uint8(x>>48),
+		uint8(x>>56))
+	return nil
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+	p.buf = append(p.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24))
+	return nil
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+	// use signed number to get arithmetic right shift.
+	return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+	// use signed number to get arithmetic right shift.
+	return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+	p.EncodeVarint(uint64(len(b)))
+	p.buf = append(p.buf, b...)
+	return nil
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+	p.EncodeVarint(uint64(len(s)))
+	p.buf = append(p.buf, s...)
+	return nil
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+	Marshal() ([]byte, error)
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+	siz := Size(pb)
+	p.EncodeVarint(uint64(siz))
+	return p.Marshal(pb)
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return v.IsNil()
+	}
+	return false
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go
new file mode 100644
index 0000000..0f5fb17
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go
@@ -0,0 +1,33 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+func NewRequiredNotSetError(field string) *RequiredNotSetError {
+	return &RequiredNotSetError{field}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go
new file mode 100644
index 0000000..d4db5a1
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/equal.go
@@ -0,0 +1,300 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+	"bytes"
+	"log"
+	"reflect"
+	"strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+  - Two messages are equal iff they are the same type,
+    corresponding fields are equal, unknown field sets
+    are equal, and extensions sets are equal.
+  - Two set scalar fields are equal iff their values are equal.
+    If the fields are of a floating-point type, remember that
+    NaN != x for all x, including NaN. If the message is defined
+    in a proto3 .proto file, fields are not "set"; specifically,
+    zero length proto3 "bytes" fields are equal (nil == {}).
+  - Two repeated fields are equal iff their lengths are the same,
+    and their corresponding elements are equal. Note a "bytes" field,
+    although represented by []byte, is not a repeated field and the
+    rule for the scalar fields described above applies.
+  - Two unset fields are equal.
+  - Two unknown field sets are equal if their current
+    encoded state is equal.
+  - Two extension sets are equal iff they have corresponding
+    elements that are pairwise equal.
+  - Two map fields are equal iff their lengths are the same,
+    and they contain the same set of elements. Zero-length map
+    fields are equal.
+  - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+	if a == nil || b == nil {
+		return a == b
+	}
+	v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+	if v1.Type() != v2.Type() {
+		return false
+	}
+	if v1.Kind() == reflect.Ptr {
+		if v1.IsNil() {
+			return v2.IsNil()
+		}
+		if v2.IsNil() {
+			return false
+		}
+		v1, v2 = v1.Elem(), v2.Elem()
+	}
+	if v1.Kind() != reflect.Struct {
+		return false
+	}
+	return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+	sprop := GetProperties(v1.Type())
+	for i := 0; i < v1.NumField(); i++ {
+		f := v1.Type().Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		f1, f2 := v1.Field(i), v2.Field(i)
+		if f.Type.Kind() == reflect.Ptr {
+			if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+				// both unset
+				continue
+			} else if n1 != n2 {
+				// set/unset mismatch
+				return false
+			}
+			f1, f2 = f1.Elem(), f2.Elem()
+		}
+		if !equalAny(f1, f2, sprop.Prop[i]) {
+			return false
+		}
+	}
+
+	if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_InternalExtensions")
+		if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+			return false
+		}
+	}
+
+	if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_extensions")
+		if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+			return false
+		}
+	}
+
+	uf := v1.FieldByName("XXX_unrecognized")
+	if !uf.IsValid() {
+		return true
+	}
+
+	u1 := uf.Bytes()
+	u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+	return bytes.Equal(u1, u2)
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+	if v1.Type() == protoMessageType {
+		m1, _ := v1.Interface().(Message)
+		m2, _ := v2.Interface().(Message)
+		return Equal(m1, m2)
+	}
+	switch v1.Kind() {
+	case reflect.Bool:
+		return v1.Bool() == v2.Bool()
+	case reflect.Float32, reflect.Float64:
+		return v1.Float() == v2.Float()
+	case reflect.Int32, reflect.Int64:
+		return v1.Int() == v2.Int()
+	case reflect.Interface:
+		// Probably a oneof field; compare the inner values.
+		n1, n2 := v1.IsNil(), v2.IsNil()
+		if n1 || n2 {
+			return n1 == n2
+		}
+		e1, e2 := v1.Elem(), v2.Elem()
+		if e1.Type() != e2.Type() {
+			return false
+		}
+		return equalAny(e1, e2, nil)
+	case reflect.Map:
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		for _, key := range v1.MapKeys() {
+			val2 := v2.MapIndex(key)
+			if !val2.IsValid() {
+				// This key was not found in the second map.
+				return false
+			}
+			if !equalAny(v1.MapIndex(key), val2, nil) {
+				return false
+			}
+		}
+		return true
+	case reflect.Ptr:
+		// Maps may have nil values in them, so check for nil.
+		if v1.IsNil() && v2.IsNil() {
+			return true
+		}
+		if v1.IsNil() != v2.IsNil() {
+			return false
+		}
+		return equalAny(v1.Elem(), v2.Elem(), prop)
+	case reflect.Slice:
+		if v1.Type().Elem().Kind() == reflect.Uint8 {
+			// short circuit: []byte
+
+			// Edge case: if this is in a proto3 message, a zero length
+			// bytes field is considered the zero value.
+			if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+				return true
+			}
+			if v1.IsNil() != v2.IsNil() {
+				return false
+			}
+			return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+		}
+
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		for i := 0; i < v1.Len(); i++ {
+			if !equalAny(v1.Index(i), v2.Index(i), prop) {
+				return false
+			}
+		}
+		return true
+	case reflect.String:
+		return v1.Interface().(string) == v2.Interface().(string)
+	case reflect.Struct:
+		return equalStruct(v1, v2)
+	case reflect.Uint32, reflect.Uint64:
+		return v1.Uint() == v2.Uint()
+	}
+
+	// unknown type, so not a protocol buffer
+	log.Printf("proto: don't know how to compare %v", v1)
+	return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+	em1, _ := x1.extensionsRead()
+	em2, _ := x2.extensionsRead()
+	return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+	if len(em1) != len(em2) {
+		return false
+	}
+
+	for extNum, e1 := range em1 {
+		e2, ok := em2[extNum]
+		if !ok {
+			return false
+		}
+
+		m1, m2 := e1.value, e2.value
+
+		if m1 == nil && m2 == nil {
+			// Both have only encoded form.
+			if bytes.Equal(e1.enc, e2.enc) {
+				continue
+			}
+			// The bytes are different, but the extensions might still be
+			// equal. We need to decode them to compare.
+		}
+
+		if m1 != nil && m2 != nil {
+			// Both are unencoded.
+			if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+				return false
+			}
+			continue
+		}
+
+		// At least one is encoded. To do a semantically correct comparison
+		// we need to unmarshal them first.
+		var desc *ExtensionDesc
+		if m := extensionMaps[base]; m != nil {
+			desc = m[extNum]
+		}
+		if desc == nil {
+			// If both have only encoded form and the bytes are the same,
+			// it is handled above. We get here when the bytes are different.
+			// We don't know how to decode it, so just compare them as byte
+			// slices.
+			log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+			return false
+		}
+		var err error
+		if m1 == nil {
+			m1, err = decodeExtension(e1.enc, desc)
+		}
+		if m2 == nil && err == nil {
+			m2, err = decodeExtension(e2.enc, desc)
+		}
+		if err != nil {
+			// The encoded form is invalid.
+			log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+			return false
+		}
+		if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go
new file mode 100644
index 0000000..44ebd45
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/extensions.go
@@ -0,0 +1,604 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"strconv"
+	"sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+	Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	extensionsWrite() map[int32]Extension
+	extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+	extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+	return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+	return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock()   {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, error) {
+	switch p := p.(type) {
+	case extendableProto:
+		if isNilPtr(p) {
+			return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+		}
+		return p, nil
+	case extendableProtoV1:
+		if isNilPtr(p) {
+			return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+		}
+		return extensionAdapter{p}, nil
+	case extensionsBytes:
+		return slowExtensionAdapter{p}, nil
+	}
+	// Don't allocate a specific error containing %T:
+	// this is the hot path for Clone and MarshalText.
+	return nil, errNotExtendable
+}
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+func isNilPtr(x interface{}) bool {
+	v := reflect.ValueOf(x)
+	return v.Kind() == reflect.Ptr && v.IsNil()
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+	// The struct must be indirect so that if a user inadvertently copies a
+	// generated message and its embedded XXX_InternalExtensions, they
+	// avoid the mayhem of a copied mutex.
+	//
+	// The mutex serializes all logically read-only operations to p.extensionMap.
+	// It is up to the client to ensure that write operations to p.extensionMap are
+	// mutually exclusive with other accesses.
+	p *struct {
+		mu           sync.Mutex
+		extensionMap map[int32]Extension
+	}
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+	if e.p == nil {
+		e.p = new(struct {
+			mu           sync.Mutex
+			extensionMap map[int32]Extension
+		})
+		e.p.extensionMap = make(map[int32]Extension)
+	}
+	return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use.  It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+	if e.p == nil {
+		return nil, nil
+	}
+	return e.p.extensionMap, &e.p.mu
+}
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+	ExtendedType  Message     // nil pointer to the type that is being extended
+	ExtensionType interface{} // nil pointer to the extension type
+	Field         int32       // field number
+	Name          string      // fully-qualified name of extension, for text formatting
+	Tag           string      // protobuf tag style
+	Filename      string      // name of the file in which the extension is defined
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+	t := reflect.TypeOf(ed.ExtensionType)
+	return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+	// When an extension is stored in a message using SetExtension
+	// only desc and value are set. When the message is marshaled
+	// enc will be set to the encoded form of the message.
+	//
+	// When a message is unmarshaled and contains extensions, each
+	// extension will have only enc set. When such an extension is
+	// accessed using GetExtension (or GetExtensions) desc and value
+	// will be set.
+	desc  *ExtensionDesc
+	value interface{}
+	enc   []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+	if ebase, ok := base.(extensionsBytes); ok {
+		clearExtension(base, id)
+		ext := ebase.GetExtensions()
+		*ext = append(*ext, b...)
+		return
+	}
+	epb, err := extendable(base)
+	if err != nil {
+		return
+	}
+	extmap := epb.extensionsWrite()
+	extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+	for _, er := range pb.ExtensionRangeArray() {
+		if er.Start <= field && field <= er.End {
+			return true
+		}
+	}
+	return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+	var pbi interface{} = pb
+	// Check the extended type.
+	if ea, ok := pbi.(extensionAdapter); ok {
+		pbi = ea.extendableProtoV1
+	}
+	if ea, ok := pbi.(slowExtensionAdapter); ok {
+		pbi = ea.extensionsBytes
+	}
+	if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+		return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
+	}
+	// Check the range.
+	if !isExtensionField(pb, extension.Field) {
+		return errors.New("proto: bad extension number; not in declared ranges")
+	}
+	return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+	base  reflect.Type
+	field int32
+}
+
+var extProp = struct {
+	sync.RWMutex
+	m map[extPropKey]*Properties
+}{
+	m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+	key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+	extProp.RLock()
+	if prop, ok := extProp.m[key]; ok {
+		extProp.RUnlock()
+		return prop
+	}
+	extProp.RUnlock()
+
+	extProp.Lock()
+	defer extProp.Unlock()
+	// Check again.
+	if prop, ok := extProp.m[key]; ok {
+		return prop
+	}
+
+	prop := new(Properties)
+	prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+	extProp.m[key] = prop
+	return prop
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+	if epb, doki := pb.(extensionsBytes); doki {
+		ext := epb.GetExtensions()
+		buf := *ext
+		o := 0
+		for o < len(buf) {
+			tag, n := DecodeVarint(buf[o:])
+			fieldNum := int32(tag >> 3)
+			if int32(fieldNum) == extension.Field {
+				return true
+			}
+			wireType := int(tag & 0x7)
+			o += n
+			l, err := size(buf[o:], wireType)
+			if err != nil {
+				return false
+			}
+			o += l
+		}
+		return false
+	}
+	// TODO: Check types, field numbers, etc.?
+	epb, err := extendable(pb)
+	if err != nil {
+		return false
+	}
+	extmap, mu := epb.extensionsRead()
+	if extmap == nil {
+		return false
+	}
+	mu.Lock()
+	_, ok := extmap[extension.Field]
+	mu.Unlock()
+	return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+	clearExtension(pb, extension.Field)
+}
+
+func clearExtension(pb Message, fieldNum int32) {
+	if epb, ok := pb.(extensionsBytes); ok {
+		offset := 0
+		for offset != -1 {
+			offset = deleteExtension(epb, fieldNum, offset)
+		}
+		return
+	}
+	epb, err := extendable(pb)
+	if err != nil {
+		return
+	}
+	// TODO: Check types, field numbers, etc.?
+	extmap := epb.extensionsWrite()
+	delete(extmap, fieldNum)
+}
+
+// GetExtension retrieves a proto2 extended field from pb.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes of the field extension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+	if epb, doki := pb.(extensionsBytes); doki {
+		ext := epb.GetExtensions()
+		return decodeExtensionFromBytes(extension, *ext)
+	}
+
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+
+	if extension.ExtendedType != nil {
+		// can only check type if this is a complete descriptor
+		if cerr := checkExtensionTypes(epb, extension); cerr != nil {
+			return nil, cerr
+		}
+	}
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return defaultExtensionValue(extension)
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	e, ok := emap[extension.Field]
+	if !ok {
+		// defaultExtensionValue returns the default value or
+		// ErrMissingExtension if there is no default.
+		return defaultExtensionValue(extension)
+	}
+
+	if e.value != nil {
+		// Already decoded. Check the descriptor, though.
+		if e.desc != extension {
+			// This shouldn't happen. If it does, it means that
+			// GetExtension was called twice with two different
+			// descriptors with the same field number.
+			return nil, errors.New("proto: descriptor conflict")
+		}
+		return e.value, nil
+	}
+
+	if extension.ExtensionType == nil {
+		// incomplete descriptor
+		return e.enc, nil
+	}
+
+	v, err := decodeExtension(e.enc, extension)
+	if err != nil {
+		return nil, err
+	}
+
+	// Remember the decoded version and drop the encoded version.
+	// That way it is safe to mutate what we return.
+	e.value = v
+	e.desc = extension
+	e.enc = nil
+	emap[extension.Field] = e
+	return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+	if extension.ExtensionType == nil {
+		// incomplete descriptor, so no default
+		return nil, ErrMissingExtension
+	}
+
+	t := reflect.TypeOf(extension.ExtensionType)
+	props := extensionProperties(extension)
+
+	sf, _, err := fieldDefault(t, props)
+	if err != nil {
+		return nil, err
+	}
+
+	if sf == nil || sf.value == nil {
+		// There is no default value.
+		return nil, ErrMissingExtension
+	}
+
+	if t.Kind() != reflect.Ptr {
+		// We do not need to return a Ptr, we can directly return sf.value.
+		return sf.value, nil
+	}
+
+	// We need to return an interface{} that is a pointer to sf.value.
+	value := reflect.New(t).Elem()
+	value.Set(reflect.New(value.Type().Elem()))
+	if sf.kind == reflect.Int32 {
+		// We may have an int32 or an enum, but the underlying data is int32.
+		// Since we can't set an int32 into a non int32 reflect.value directly
+		// set it as a int32.
+		value.Elem().SetInt(int64(sf.value.(int32)))
+	} else {
+		value.Elem().Set(reflect.ValueOf(sf.value))
+	}
+	return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+	t := reflect.TypeOf(extension.ExtensionType)
+	unmarshal := typeUnmarshaler(t, extension.Tag)
+
+	// t is a pointer to a struct, pointer to basic type or a slice.
+	// Allocate space to store the pointer/slice.
+	value := reflect.New(t).Elem()
+
+	var err error
+	for {
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		wire := int(x) & 7
+
+		b, err = unmarshal(b, valToPointer(value.Addr()), wire)
+		if err != nil {
+			return nil, err
+		}
+
+		if len(b) == 0 {
+			break
+		}
+	}
+	return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+	extensions = make([]interface{}, len(es))
+	for i, e := range es {
+		extensions[i], err = GetExtension(epb, e)
+		if err == ErrMissingExtension {
+			err = nil
+		}
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+	registeredExtensions := RegisteredExtensions(pb)
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return nil, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	extensions := make([]*ExtensionDesc, 0, len(emap))
+	for extid, e := range emap {
+		desc := e.desc
+		if desc == nil {
+			desc = registeredExtensions[extid]
+			if desc == nil {
+				desc = &ExtensionDesc{Field: extid}
+			}
+		}
+
+		extensions = append(extensions, desc)
+	}
+	return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+	if epb, ok := pb.(extensionsBytes); ok {
+		newb, err := encodeExtension(extension, value)
+		if err != nil {
+			return err
+		}
+		bb := epb.GetExtensions()
+		*bb = append(*bb, newb...)
+		return nil
+	}
+	epb, err := extendable(pb)
+	if err != nil {
+		return err
+	}
+	if err := checkExtensionTypes(epb, extension); err != nil {
+		return err
+	}
+	typ := reflect.TypeOf(extension.ExtensionType)
+	if typ != reflect.TypeOf(value) {
+		return errors.New("proto: bad extension value type")
+	}
+	// nil extension values need to be caught early, because the
+	// encoder can't distinguish an ErrNil due to a nil extension
+	// from an ErrNil due to a missing field. Extensions are
+	// always optional, so the encoder would just swallow the error
+	// and drop all the extensions from the encoded message.
+	if reflect.ValueOf(value).IsNil() {
+		return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+	}
+
+	extmap := epb.extensionsWrite()
+	extmap[extension.Field] = Extension{desc: extension, value: value}
+	return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+	if epb, doki := pb.(extensionsBytes); doki {
+		ext := epb.GetExtensions()
+		*ext = []byte{}
+		return
+	}
+	epb, err := extendable(pb)
+	if err != nil {
+		return
+	}
+	m := epb.extensionsWrite()
+	for k := range m {
+		delete(m, k)
+	}
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+	st := reflect.TypeOf(desc.ExtendedType).Elem()
+	m := extensionMaps[st]
+	if m == nil {
+		m = make(map[int32]*ExtensionDesc)
+		extensionMaps[st] = m
+	}
+	if _, ok := m[desc.Field]; ok {
+		panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+	}
+	m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+	return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
new file mode 100644
index 0000000..53ebd8c
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
@@ -0,0 +1,368 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"sort"
+	"strings"
+	"sync"
+)
+
+type extensionsBytes interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	GetExtensions() *[]byte
+}
+
+type slowExtensionAdapter struct {
+	extensionsBytes
+}
+
+func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension {
+	panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.")
+}
+
+func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+	b := s.GetExtensions()
+	m, err := BytesToExtensionsMap(*b)
+	if err != nil {
+		panic(err)
+	}
+	return m, notLocker{}
+}
+
+func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool {
+	if reflect.ValueOf(pb).IsNil() {
+		return ifnotset
+	}
+	value, err := GetExtension(pb, extension)
+	if err != nil {
+		return ifnotset
+	}
+	if value == nil {
+		return ifnotset
+	}
+	if value.(*bool) == nil {
+		return ifnotset
+	}
+	return *(value.(*bool))
+}
+
+func (this *Extension) Equal(that *Extension) bool {
+	if err := this.Encode(); err != nil {
+		return false
+	}
+	if err := that.Encode(); err != nil {
+		return false
+	}
+	return bytes.Equal(this.enc, that.enc)
+}
+
+func (this *Extension) Compare(that *Extension) int {
+	if err := this.Encode(); err != nil {
+		return 1
+	}
+	if err := that.Encode(); err != nil {
+		return -1
+	}
+	return bytes.Compare(this.enc, that.enc)
+}
+
+func SizeOfInternalExtension(m extendableProto) (n int) {
+	info := getMarshalInfo(reflect.TypeOf(m))
+	return info.sizeV1Extensions(m.extensionsWrite())
+}
+
+type sortableMapElem struct {
+	field int32
+	ext   Extension
+}
+
+func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions {
+	s := make(sortableExtensions, 0, len(m))
+	for k, v := range m {
+		s = append(s, &sortableMapElem{field: k, ext: v})
+	}
+	return s
+}
+
+type sortableExtensions []*sortableMapElem
+
+func (this sortableExtensions) Len() int { return len(this) }
+
+func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] }
+
+func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field }
+
+func (this sortableExtensions) String() string {
+	sort.Sort(this)
+	ss := make([]string, len(this))
+	for i := range this {
+		ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext)
+	}
+	return "map[" + strings.Join(ss, ",") + "]"
+}
+
+func StringFromInternalExtension(m extendableProto) string {
+	return StringFromExtensionsMap(m.extensionsWrite())
+}
+
+func StringFromExtensionsMap(m map[int32]Extension) string {
+	return newSortableExtensionsFromMap(m).String()
+}
+
+func StringFromExtensionsBytes(ext []byte) string {
+	m, err := BytesToExtensionsMap(ext)
+	if err != nil {
+		panic(err)
+	}
+	return StringFromExtensionsMap(m)
+}
+
+func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) {
+	return EncodeExtensionMap(m.extensionsWrite(), data)
+}
+
+func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
+	o := 0
+	for _, e := range m {
+		if err := e.Encode(); err != nil {
+			return 0, err
+		}
+		n := copy(data[o:], e.enc)
+		if n != len(e.enc) {
+			return 0, io.ErrShortBuffer
+		}
+		o += n
+	}
+	return o, nil
+}
+
+func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
+	e := m[id]
+	if err := e.Encode(); err != nil {
+		return nil, err
+	}
+	return e.enc, nil
+}
+
+func size(buf []byte, wire int) (int, error) {
+	switch wire {
+	case WireVarint:
+		_, n := DecodeVarint(buf)
+		return n, nil
+	case WireFixed64:
+		return 8, nil
+	case WireBytes:
+		v, n := DecodeVarint(buf)
+		return int(v) + n, nil
+	case WireFixed32:
+		return 4, nil
+	case WireStartGroup:
+		offset := 0
+		for {
+			u, n := DecodeVarint(buf[offset:])
+			fwire := int(u & 0x7)
+			offset += n
+			if fwire == WireEndGroup {
+				return offset, nil
+			}
+			s, err := size(buf[offset:], wire)
+			if err != nil {
+				return 0, err
+			}
+			offset += s
+		}
+	}
+	return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire)
+}
+
+func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) {
+	m := make(map[int32]Extension)
+	i := 0
+	for i < len(buf) {
+		tag, n := DecodeVarint(buf[i:])
+		if n <= 0 {
+			return nil, fmt.Errorf("unable to decode varint")
+		}
+		fieldNum := int32(tag >> 3)
+		wireType := int(tag & 0x7)
+		l, err := size(buf[i+n:], wireType)
+		if err != nil {
+			return nil, err
+		}
+		end := i + int(l) + n
+		m[int32(fieldNum)] = Extension{enc: buf[i:end]}
+		i = end
+	}
+	return m, nil
+}
+
+func NewExtension(e []byte) Extension {
+	ee := Extension{enc: make([]byte, len(e))}
+	copy(ee.enc, e)
+	return ee
+}
+
+func AppendExtension(e Message, tag int32, buf []byte) {
+	if ee, eok := e.(extensionsBytes); eok {
+		ext := ee.GetExtensions()
+		*ext = append(*ext, buf...)
+		return
+	}
+	if ee, eok := e.(extendableProto); eok {
+		m := ee.extensionsWrite()
+		ext := m[int32(tag)] // may be missing
+		ext.enc = append(ext.enc, buf...)
+		m[int32(tag)] = ext
+	}
+}
+
+func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) {
+	u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType))
+	ei := u.getExtElemInfo(extension)
+	v := value
+	p := toAddrPointer(&v, ei.isptr)
+	siz := ei.sizer(p, SizeVarint(ei.wiretag))
+	buf := make([]byte, 0, siz)
+	return ei.marshaler(buf, p, ei.wiretag, false)
+}
+
+func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) {
+	o := 0
+	for o < len(buf) {
+		tag, n := DecodeVarint((buf)[o:])
+		fieldNum := int32(tag >> 3)
+		wireType := int(tag & 0x7)
+		if o+n > len(buf) {
+			return nil, fmt.Errorf("unable to decode extension")
+		}
+		l, err := size((buf)[o+n:], wireType)
+		if err != nil {
+			return nil, err
+		}
+		if int32(fieldNum) == extension.Field {
+			if o+n+l > len(buf) {
+				return nil, fmt.Errorf("unable to decode extension")
+			}
+			v, err := decodeExtension((buf)[o:o+n+l], extension)
+			if err != nil {
+				return nil, err
+			}
+			return v, nil
+		}
+		o += n + l
+	}
+	return defaultExtensionValue(extension)
+}
+
+func (this *Extension) Encode() error {
+	if this.enc == nil {
+		var err error
+		this.enc, err = encodeExtension(this.desc, this.value)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (this Extension) GoString() string {
+	if err := this.Encode(); err != nil {
+		return fmt.Sprintf("error encoding extension: %v", err)
+	}
+	return fmt.Sprintf("proto.NewExtension(%#v)", this.enc)
+}
+
+func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error {
+	typ := reflect.TypeOf(pb).Elem()
+	ext, ok := extensionMaps[typ]
+	if !ok {
+		return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String())
+	}
+	desc, ok := ext[fieldNum]
+	if !ok {
+		return errors.New("proto: bad extension number; not in declared ranges")
+	}
+	return SetExtension(pb, desc, value)
+}
+
+func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) {
+	typ := reflect.TypeOf(pb).Elem()
+	ext, ok := extensionMaps[typ]
+	if !ok {
+		return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String())
+	}
+	desc, ok := ext[fieldNum]
+	if !ok {
+		return nil, fmt.Errorf("unregistered field number %d", fieldNum)
+	}
+	return GetExtension(pb, desc)
+}
+
+func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions {
+	x := &XXX_InternalExtensions{
+		p: new(struct {
+			mu           sync.Mutex
+			extensionMap map[int32]Extension
+		}),
+	}
+	x.p.extensionMap = m
+	return *x
+}
+
+func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension {
+	pb := extendable.(extendableProto)
+	return pb.extensionsWrite()
+}
+
+func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
+	ext := pb.GetExtensions()
+	for offset < len(*ext) {
+		tag, n1 := DecodeVarint((*ext)[offset:])
+		fieldNum := int32(tag >> 3)
+		wireType := int(tag & 0x7)
+		n2, err := size((*ext)[offset+n1:], wireType)
+		if err != nil {
+			panic(err)
+		}
+		newOffset := offset + n1 + n2
+		if fieldNum == theFieldNum {
+			*ext = append((*ext)[:offset], (*ext)[newOffset:]...)
+			return offset
+		}
+		offset = newOffset
+	}
+	return -1
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go
new file mode 100644
index 0000000..b2271d0
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/lib.go
@@ -0,0 +1,987 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers.  It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+  - Names are turned from camel_case to CamelCase for export.
+  - There are no methods on v to set fields; just treat
+	them as structure fields.
+  - There are getters that return a field's value if set,
+	and return the field's default value if unset.
+	The getters work even if the receiver is a nil message.
+  - The zero value for a struct is its correct initialization state.
+	All desired fields must be set before marshaling.
+  - A Reset() method will restore a protobuf struct to its zero state.
+  - Non-repeated fields are pointers to the values; nil means unset.
+	That is, optional or required field int32 f becomes F *int32.
+  - Repeated fields are slices.
+  - Helper functions are available to aid the setting of fields.
+	msg.Foo = proto.String("hello") // set field
+  - Constants are defined to hold the default values of all fields that
+	have them.  They have the form Default_StructName_FieldName.
+	Because the getter methods handle defaulted values,
+	direct use of these constants should be rare.
+  - Enums are given type names and maps from names to values.
+	Enum values are prefixed by the enclosing message's name, or by the
+	enum's type name if it is a top-level enum. Enum types have a String
+	method, and a Enum method to assist in message construction.
+  - Nested messages, groups and enums have type names prefixed with the name of
+	the surrounding message type.
+  - Extensions are given descriptor names that start with E_,
+	followed by an underscore-delimited list of the nested messages
+	that contain it (if any) followed by the CamelCased name of the
+	extension field itself.  HasExtension, ClearExtension, GetExtension
+	and SetExtension are functions for manipulating extensions.
+  - Oneof field sets are given a single field in their message,
+	with distinguished wrapper types for each possible field value.
+  - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+  - Non-repeated fields of non-message type are values instead of pointers.
+  - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+	package example;
+
+	enum FOO { X = 17; }
+
+	message Test {
+	  required string label = 1;
+	  optional int32 type = 2 [default=77];
+	  repeated int64 reps = 3;
+	  optional group OptionalGroup = 4 {
+	    required string RequiredField = 5;
+	  }
+	  oneof union {
+	    int32 number = 6;
+	    string name = 7;
+	  }
+	}
+
+The resulting file, test.pb.go, is:
+
+	package example
+
+	import proto "github.com/gogo/protobuf/proto"
+	import math "math"
+
+	type FOO int32
+	const (
+		FOO_X FOO = 17
+	)
+	var FOO_name = map[int32]string{
+		17: "X",
+	}
+	var FOO_value = map[string]int32{
+		"X": 17,
+	}
+
+	func (x FOO) Enum() *FOO {
+		p := new(FOO)
+		*p = x
+		return p
+	}
+	func (x FOO) String() string {
+		return proto.EnumName(FOO_name, int32(x))
+	}
+	func (x *FOO) UnmarshalJSON(data []byte) error {
+		value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+		if err != nil {
+			return err
+		}
+		*x = FOO(value)
+		return nil
+	}
+
+	type Test struct {
+		Label         *string             `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+		Type          *int32              `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+		Reps          []int64             `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+		Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+		// Types that are valid to be assigned to Union:
+		//	*Test_Number
+		//	*Test_Name
+		Union            isTest_Union `protobuf_oneof:"union"`
+		XXX_unrecognized []byte       `json:"-"`
+	}
+	func (m *Test) Reset()         { *m = Test{} }
+	func (m *Test) String() string { return proto.CompactTextString(m) }
+	func (*Test) ProtoMessage() {}
+
+	type isTest_Union interface {
+		isTest_Union()
+	}
+
+	type Test_Number struct {
+		Number int32 `protobuf:"varint,6,opt,name=number"`
+	}
+	type Test_Name struct {
+		Name string `protobuf:"bytes,7,opt,name=name"`
+	}
+
+	func (*Test_Number) isTest_Union() {}
+	func (*Test_Name) isTest_Union()   {}
+
+	func (m *Test) GetUnion() isTest_Union {
+		if m != nil {
+			return m.Union
+		}
+		return nil
+	}
+	const Default_Test_Type int32 = 77
+
+	func (m *Test) GetLabel() string {
+		if m != nil && m.Label != nil {
+			return *m.Label
+		}
+		return ""
+	}
+
+	func (m *Test) GetType() int32 {
+		if m != nil && m.Type != nil {
+			return *m.Type
+		}
+		return Default_Test_Type
+	}
+
+	func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+		if m != nil {
+			return m.Optionalgroup
+		}
+		return nil
+	}
+
+	type Test_OptionalGroup struct {
+		RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+	}
+	func (m *Test_OptionalGroup) Reset()         { *m = Test_OptionalGroup{} }
+	func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+	func (m *Test_OptionalGroup) GetRequiredField() string {
+		if m != nil && m.RequiredField != nil {
+			return *m.RequiredField
+		}
+		return ""
+	}
+
+	func (m *Test) GetNumber() int32 {
+		if x, ok := m.GetUnion().(*Test_Number); ok {
+			return x.Number
+		}
+		return 0
+	}
+
+	func (m *Test) GetName() string {
+		if x, ok := m.GetUnion().(*Test_Name); ok {
+			return x.Name
+		}
+		return ""
+	}
+
+	func init() {
+		proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+	}
+
+To create and play with a Test object:
+
+	package main
+
+	import (
+		"log"
+
+		"github.com/gogo/protobuf/proto"
+		pb "./example.pb"
+	)
+
+	func main() {
+		test := &pb.Test{
+			Label: proto.String("hello"),
+			Type:  proto.Int32(17),
+			Reps:  []int64{1, 2, 3},
+			Optionalgroup: &pb.Test_OptionalGroup{
+				RequiredField: proto.String("good bye"),
+			},
+			Union: &pb.Test_Name{"fred"},
+		}
+		data, err := proto.Marshal(test)
+		if err != nil {
+			log.Fatal("marshaling error: ", err)
+		}
+		newTest := &pb.Test{}
+		err = proto.Unmarshal(data, newTest)
+		if err != nil {
+			log.Fatal("unmarshaling error: ", err)
+		}
+		// Now test and newTest contain the same data.
+		if test.GetLabel() != newTest.GetLabel() {
+			log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+		}
+		// Use a type switch to determine which oneof was set.
+		switch u := test.Union.(type) {
+		case *pb.Test_Number: // u.Number contains the number.
+		case *pb.Test_Name: // u.Name contains the string.
+		}
+		// etc.
+	}
+*/
+package proto
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"reflect"
+	"sort"
+	"strconv"
+	"sync"
+)
+
+// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
+// Marshal reports this when a required field is not initialized.
+// Unmarshal reports this when a required field is missing from the wire data.
+type RequiredNotSetError struct{ field string }
+
+func (e *RequiredNotSetError) Error() string {
+	if e.field == "" {
+		return fmt.Sprintf("proto: required field not set")
+	}
+	return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+	return true
+}
+
+type invalidUTF8Error struct{ field string }
+
+func (e *invalidUTF8Error) Error() string {
+	if e.field == "" {
+		return "proto: invalid UTF-8 detected"
+	}
+	return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
+}
+func (e *invalidUTF8Error) InvalidUTF8() bool {
+	return true
+}
+
+// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
+// This error should not be exposed to the external API as such errors should
+// be recreated with the field information.
+var errInvalidUTF8 = &invalidUTF8Error{}
+
+// isNonFatal reports whether the error is either a RequiredNotSet error
+// or a InvalidUTF8 error.
+func isNonFatal(err error) bool {
+	if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
+		return true
+	}
+	if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
+		return true
+	}
+	return false
+}
+
+type nonFatal struct{ E error }
+
+// Merge merges err into nf and reports whether it was successful.
+// Otherwise it returns false for any fatal non-nil errors.
+func (nf *nonFatal) Merge(err error) (ok bool) {
+	if err == nil {
+		return true // not an error
+	}
+	if !isNonFatal(err) {
+		return false // fatal error
+	}
+	if nf.E == nil {
+		nf.E = err // store first instance of non-fatal error
+	}
+	return true
+}
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+	Reset()
+	String() string
+	ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders.  Useful for tuning the library itself.
+type Stats struct {
+	Emalloc uint64 // mallocs in encode
+	Dmalloc uint64 // mallocs in decode
+	Encode  uint64 // number of encodes
+	Decode  uint64 // number of decodes
+	Chit    uint64 // number of cache hits
+	Cmiss   uint64 // number of cache misses
+	Size    uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers.  It may be reused between invocations to
+// reduce memory usage.  It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+	buf   []byte // encode/decode byte stream
+	index int    // read point
+
+	deterministic bool
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+	return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+	p.buf = p.buf[0:0] // for reading/writing
+	p.index = 0        // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+	p.buf = s
+	p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+// SetDeterministic sets whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+//   - Repeated serialization of a message will return the same bytes.
+//   - Different processes of the same binary (which may be executing on
+//     different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (p *Buffer) SetDeterministic(deterministic bool) {
+	p.deterministic = deterministic
+}
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+	return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+	return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+	p := new(int32)
+	*p = int32(v)
+	return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+	return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+	return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+	return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+	return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+	return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+	return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name.  Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+	s, ok := m[v]
+	if ok {
+		return s
+	}
+	return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+	if data[0] == '"' {
+		// New style: enums are strings.
+		var repr string
+		if err := json.Unmarshal(data, &repr); err != nil {
+			return -1, err
+		}
+		val, ok := m[repr]
+		if !ok {
+			return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+		}
+		return val, nil
+	}
+	// Old style: enums are ints.
+	var val int32
+	if err := json.Unmarshal(data, &val); err != nil {
+		return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+	}
+	return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+	var u uint64
+
+	obuf := p.buf
+	sindex := p.index
+	p.buf = b
+	p.index = 0
+	depth := 0
+
+	fmt.Printf("\n--- %s ---\n", s)
+
+out:
+	for {
+		for i := 0; i < depth; i++ {
+			fmt.Print("  ")
+		}
+
+		index := p.index
+		if index == len(p.buf) {
+			break
+		}
+
+		op, err := p.DecodeVarint()
+		if err != nil {
+			fmt.Printf("%3d: fetching op err %v\n", index, err)
+			break out
+		}
+		tag := op >> 3
+		wire := op & 7
+
+		switch wire {
+		default:
+			fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+				index, tag, wire)
+			break out
+
+		case WireBytes:
+			var r []byte
+
+			r, err = p.DecodeRawBytes(false)
+			if err != nil {
+				break out
+			}
+			fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+			if len(r) <= 6 {
+				for i := 0; i < len(r); i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+			} else {
+				for i := 0; i < 3; i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+				fmt.Printf(" ..")
+				for i := len(r) - 3; i < len(r); i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+			}
+			fmt.Printf("\n")
+
+		case WireFixed32:
+			u, err = p.DecodeFixed32()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+		case WireFixed64:
+			u, err = p.DecodeFixed64()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+		case WireVarint:
+			u, err = p.DecodeVarint()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+		case WireStartGroup:
+			fmt.Printf("%3d: t=%3d start\n", index, tag)
+			depth++
+
+		case WireEndGroup:
+			depth--
+			fmt.Printf("%3d: t=%3d end\n", index, tag)
+		}
+	}
+
+	if depth != 0 {
+		fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+	}
+	fmt.Printf("\n")
+
+	p.buf = obuf
+	p.index = sindex
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+	setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+	if v.Kind() == reflect.Ptr {
+		v = v.Elem()
+	}
+
+	defaultMu.RLock()
+	dm, ok := defaults[v.Type()]
+	defaultMu.RUnlock()
+	if !ok {
+		dm = buildDefaultMessage(v.Type())
+		defaultMu.Lock()
+		defaults[v.Type()] = dm
+		defaultMu.Unlock()
+	}
+
+	for _, sf := range dm.scalars {
+		f := v.Field(sf.index)
+		if !f.IsNil() {
+			// field already set
+			continue
+		}
+		dv := sf.value
+		if dv == nil && !zeros {
+			// no explicit default, and don't want to set zeros
+			continue
+		}
+		fptr := f.Addr().Interface() // **T
+		// TODO: Consider batching the allocations we do here.
+		switch sf.kind {
+		case reflect.Bool:
+			b := new(bool)
+			if dv != nil {
+				*b = dv.(bool)
+			}
+			*(fptr.(**bool)) = b
+		case reflect.Float32:
+			f := new(float32)
+			if dv != nil {
+				*f = dv.(float32)
+			}
+			*(fptr.(**float32)) = f
+		case reflect.Float64:
+			f := new(float64)
+			if dv != nil {
+				*f = dv.(float64)
+			}
+			*(fptr.(**float64)) = f
+		case reflect.Int32:
+			// might be an enum
+			if ft := f.Type(); ft != int32PtrType {
+				// enum
+				f.Set(reflect.New(ft.Elem()))
+				if dv != nil {
+					f.Elem().SetInt(int64(dv.(int32)))
+				}
+			} else {
+				// int32 field
+				i := new(int32)
+				if dv != nil {
+					*i = dv.(int32)
+				}
+				*(fptr.(**int32)) = i
+			}
+		case reflect.Int64:
+			i := new(int64)
+			if dv != nil {
+				*i = dv.(int64)
+			}
+			*(fptr.(**int64)) = i
+		case reflect.String:
+			s := new(string)
+			if dv != nil {
+				*s = dv.(string)
+			}
+			*(fptr.(**string)) = s
+		case reflect.Uint8:
+			// exceptional case: []byte
+			var b []byte
+			if dv != nil {
+				db := dv.([]byte)
+				b = make([]byte, len(db))
+				copy(b, db)
+			} else {
+				b = []byte{}
+			}
+			*(fptr.(*[]byte)) = b
+		case reflect.Uint32:
+			u := new(uint32)
+			if dv != nil {
+				*u = dv.(uint32)
+			}
+			*(fptr.(**uint32)) = u
+		case reflect.Uint64:
+			u := new(uint64)
+			if dv != nil {
+				*u = dv.(uint64)
+			}
+			*(fptr.(**uint64)) = u
+		default:
+			log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+		}
+	}
+
+	for _, ni := range dm.nested {
+		f := v.Field(ni)
+		// f is *T or T or []*T or []T
+		switch f.Kind() {
+		case reflect.Struct:
+			setDefaults(f, recur, zeros)
+
+		case reflect.Ptr:
+			if f.IsNil() {
+				continue
+			}
+			setDefaults(f, recur, zeros)
+
+		case reflect.Slice:
+			for i := 0; i < f.Len(); i++ {
+				e := f.Index(i)
+				if e.Kind() == reflect.Ptr && e.IsNil() {
+					continue
+				}
+				setDefaults(e, recur, zeros)
+			}
+
+		case reflect.Map:
+			for _, k := range f.MapKeys() {
+				e := f.MapIndex(k)
+				if e.IsNil() {
+					continue
+				}
+				setDefaults(e, recur, zeros)
+			}
+		}
+	}
+}
+
+var (
+	// defaults maps a protocol buffer struct type to a slice of the fields,
+	// with its scalar fields set to their proto-declared non-zero default values.
+	defaultMu sync.RWMutex
+	defaults  = make(map[reflect.Type]defaultMessage)
+
+	int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+	scalars []scalarField
+	nested  []int // struct field index of nested messages
+}
+
+type scalarField struct {
+	index int          // struct field index
+	kind  reflect.Kind // element type (the T in *T or []T)
+	value interface{}  // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+	sprop := GetProperties(t)
+	for _, prop := range sprop.Prop {
+		fi, ok := sprop.decoderTags.get(prop.Tag)
+		if !ok {
+			// XXX_unrecognized
+			continue
+		}
+		ft := t.Field(fi).Type
+
+		sf, nested, err := fieldDefault(ft, prop)
+		switch {
+		case err != nil:
+			log.Print(err)
+		case nested:
+			dm.nested = append(dm.nested, fi)
+		case sf != nil:
+			sf.index = fi
+			dm.scalars = append(dm.scalars, *sf)
+		}
+	}
+
+	return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+	var canHaveDefault bool
+	switch ft.Kind() {
+	case reflect.Struct:
+		nestedMessage = true // non-nullable
+
+	case reflect.Ptr:
+		if ft.Elem().Kind() == reflect.Struct {
+			nestedMessage = true
+		} else {
+			canHaveDefault = true // proto2 scalar field
+		}
+
+	case reflect.Slice:
+		switch ft.Elem().Kind() {
+		case reflect.Ptr, reflect.Struct:
+			nestedMessage = true // repeated message
+		case reflect.Uint8:
+			canHaveDefault = true // bytes field
+		}
+
+	case reflect.Map:
+		if ft.Elem().Kind() == reflect.Ptr {
+			nestedMessage = true // map with message values
+		}
+	}
+
+	if !canHaveDefault {
+		if nestedMessage {
+			return nil, true, nil
+		}
+		return nil, false, nil
+	}
+
+	// We now know that ft is a pointer or slice.
+	sf = &scalarField{kind: ft.Elem().Kind()}
+
+	// scalar fields without defaults
+	if !prop.HasDefault {
+		return sf, false, nil
+	}
+
+	// a scalar field: either *T or []byte
+	switch ft.Elem().Kind() {
+	case reflect.Bool:
+		x, err := strconv.ParseBool(prop.Default)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.Float32:
+		x, err := strconv.ParseFloat(prop.Default, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+		}
+		sf.value = float32(x)
+	case reflect.Float64:
+		x, err := strconv.ParseFloat(prop.Default, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.Int32:
+		x, err := strconv.ParseInt(prop.Default, 10, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+		}
+		sf.value = int32(x)
+	case reflect.Int64:
+		x, err := strconv.ParseInt(prop.Default, 10, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.String:
+		sf.value = prop.Default
+	case reflect.Uint8:
+		// []byte (not *uint8)
+		sf.value = []byte(prop.Default)
+	case reflect.Uint32:
+		x, err := strconv.ParseUint(prop.Default, 10, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+		}
+		sf.value = uint32(x)
+	case reflect.Uint64:
+		x, err := strconv.ParseUint(prop.Default, 10, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	default:
+		return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+	}
+
+	return sf, false, nil
+}
+
+// mapKeys returns a sort.Interface to be used for sorting the map keys.
+// Map fields may have key types of non-float scalars, strings and enums.
+func mapKeys(vs []reflect.Value) sort.Interface {
+	s := mapKeySorter{vs: vs}
+
+	// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
+	if len(vs) == 0 {
+		return s
+	}
+	switch vs[0].Kind() {
+	case reflect.Int32, reflect.Int64:
+		s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+	case reflect.Uint32, reflect.Uint64:
+		s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+	case reflect.Bool:
+		s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
+	case reflect.String:
+		s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
+	default:
+		panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
+	}
+
+	return s
+}
+
+type mapKeySorter struct {
+	vs   []reflect.Value
+	less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int      { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+	return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint32, reflect.Uint64:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.String:
+		return v.String() == ""
+	}
+	return false
+}
+
+// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const GoGoProtoPackageIsVersion2 = true
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const GoGoProtoPackageIsVersion1 = true
+
+// InternalMessageInfo is a type used internally by generated .pb.go files.
+// This type is not intended to be used by non-generated code.
+// This type is not subject to any compatibility guarantee.
+type InternalMessageInfo struct {
+	marshal   *marshalInfo
+	unmarshal *unmarshalInfo
+	merge     *mergeInfo
+	discard   *discardInfo
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go
new file mode 100644
index 0000000..b3aa391
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go
@@ -0,0 +1,50 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"encoding/json"
+	"strconv"
+)
+
+type Sizer interface {
+	Size() int
+}
+
+type ProtoSizer interface {
+	ProtoSize() int
+}
+
+func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) {
+	s, ok := m[value]
+	if !ok {
+		s = strconv.Itoa(int(value))
+	}
+	return json.Marshal(s)
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go
new file mode 100644
index 0000000..3b6ca41
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/message_set.go
@@ -0,0 +1,314 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"reflect"
+	"sort"
+	"sync"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+//   message MessageSet {
+//     repeated group Item = 1 {
+//       required int32 type_id = 2;
+//       required string message = 3;
+//     };
+//   }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+	TypeId  *int32 `protobuf:"varint,2,req,name=type_id"`
+	Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+	Item             []*_MessageSet_Item `protobuf:"group,1,rep"`
+	XXX_unrecognized []byte
+	// TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+	MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+	mti, ok := pb.(messageTypeIder)
+	if !ok {
+		return nil
+	}
+	id := mti.MessageTypeId()
+	for _, item := range ms.Item {
+		if *item.TypeId == id {
+			return item
+		}
+	}
+	return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+	return ms.find(pb) != nil
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+	if item := ms.find(pb); item != nil {
+		return Unmarshal(item.Message, pb)
+	}
+	if _, ok := pb.(messageTypeIder); !ok {
+		return errNoMessageTypeID
+	}
+	return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+	msg, err := Marshal(pb)
+	if err != nil {
+		return err
+	}
+	if item := ms.find(pb); item != nil {
+		// reuse existing item
+		item.Message = msg
+		return nil
+	}
+
+	mti, ok := pb.(messageTypeIder)
+	if !ok {
+		return errNoMessageTypeID
+	}
+
+	mtid := mti.MessageTypeId()
+	ms.Item = append(ms.Item, &_MessageSet_Item{
+		TypeId:  &mtid,
+		Message: msg,
+	})
+	return nil
+}
+
+func (ms *messageSet) Reset()         { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage()     {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+	i := 0
+	for ; buf[i]&0x80 != 0; i++ {
+	}
+	return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(exts interface{}) ([]byte, error) {
+	return marshalMessageSet(exts, false)
+}
+
+// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
+func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		var u marshalInfo
+		siz := u.sizeMessageSet(exts)
+		b := make([]byte, 0, siz)
+		return u.appendMessageSet(b, exts, deterministic)
+
+	case map[int32]Extension:
+		// This is an old-style extension map.
+		// Wrap it in a new-style XXX_InternalExtensions.
+		ie := XXX_InternalExtensions{
+			p: &struct {
+				mu           sync.Mutex
+				extensionMap map[int32]Extension
+			}{
+				extensionMap: exts,
+			},
+		}
+
+		var u marshalInfo
+		siz := u.sizeMessageSet(&ie)
+		b := make([]byte, 0, siz)
+		return u.appendMessageSet(b, &ie, deterministic)
+
+	default:
+		return nil, errors.New("proto: not an extension map")
+	}
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		m = exts.extensionsWrite()
+	case map[int32]Extension:
+		m = exts
+	default:
+		return errors.New("proto: not an extension map")
+	}
+
+	ms := new(messageSet)
+	if err := Unmarshal(buf, ms); err != nil {
+		return err
+	}
+	for _, item := range ms.Item {
+		id := *item.TypeId
+		msg := item.Message
+
+		// Restore wire type and field number varint, plus length varint.
+		// Be careful to preserve duplicate items.
+		b := EncodeVarint(uint64(id)<<3 | WireBytes)
+		if ext, ok := m[id]; ok {
+			// Existing data; rip off the tag and length varint
+			// so we join the new data correctly.
+			// We can assume that ext.enc is set because we are unmarshaling.
+			o := ext.enc[len(b):]   // skip wire type and field number
+			_, n := DecodeVarint(o) // calculate length of length varint
+			o = o[n:]               // skip length varint
+			msg = append(o, msg...) // join old data and new data
+		}
+		b = append(b, EncodeVarint(uint64(len(msg)))...)
+		b = append(b, msg...)
+
+		m[id] = Extension{enc: b}
+	}
+	return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		var mu sync.Locker
+		m, mu = exts.extensionsRead()
+		if m != nil {
+			// Keep the extensions map locked until we're done marshaling to prevent
+			// races between marshaling and unmarshaling the lazily-{en,de}coded
+			// values.
+			mu.Lock()
+			defer mu.Unlock()
+		}
+	case map[int32]Extension:
+		m = exts
+	default:
+		return nil, errors.New("proto: not an extension map")
+	}
+	var b bytes.Buffer
+	b.WriteByte('{')
+
+	// Process the map in key order for deterministic output.
+	ids := make([]int32, 0, len(m))
+	for id := range m {
+		ids = append(ids, id)
+	}
+	sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+	for i, id := range ids {
+		ext := m[id]
+		msd, ok := messageSetMap[id]
+		if !ok {
+			// Unknown type; we can't render it, so skip it.
+			continue
+		}
+
+		if i > 0 && b.Len() > 1 {
+			b.WriteByte(',')
+		}
+
+		fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+		x := ext.value
+		if x == nil {
+			x = reflect.New(msd.t.Elem()).Interface()
+			if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+				return nil, err
+			}
+		}
+		d, err := json.Marshal(x)
+		if err != nil {
+			return nil, err
+		}
+		b.Write(d)
+	}
+	b.WriteByte('}')
+	return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
+	// Common-case fast path.
+	if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+		return nil
+	}
+
+	// This is fairly tricky, and it's not clear that it is needed.
+	return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+	t    reflect.Type // pointer to struct
+	name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+	messageSetMap[fieldNum] = messageSetDesc{
+		t:    reflect.TypeOf(m),
+		name: name,
+	}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..b6cad90
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,357 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build purego appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+	"reflect"
+	"sync"
+)
+
+const unsafeAllowed = false
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+	return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// zeroField is a noop when calling pointer.offset.
+var zeroField = field([]int{})
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// The pointer type is for the table-driven decoder.
+// The implementation here uses a reflect.Value of pointer type to
+// create a generic pointer. In pointer_unsafe.go we use unsafe
+// instead of reflect to implement the same (but faster) interface.
+type pointer struct {
+	v reflect.Value
+}
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+	return pointer{v: reflect.ValueOf(*i)}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr bool) pointer {
+	v := reflect.ValueOf(*i)
+	u := reflect.New(v.Type())
+	u.Elem().Set(v)
+	return pointer{v: u}
+}
+
+// valToPointer converts v to a pointer.  v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+	return pointer{v: v}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+	return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
+}
+
+func (p pointer) isNil() bool {
+	return p.v.IsNil()
+}
+
+// grow updates the slice s in place to make it one element longer.
+// s must be addressable.
+// Returns the (addressable) new element.
+func grow(s reflect.Value) reflect.Value {
+	n, m := s.Len(), s.Cap()
+	if n < m {
+		s.SetLen(n + 1)
+	} else {
+		s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
+	}
+	return s.Index(n)
+}
+
+func (p pointer) toInt64() *int64 {
+	return p.v.Interface().(*int64)
+}
+func (p pointer) toInt64Ptr() **int64 {
+	return p.v.Interface().(**int64)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+	return p.v.Interface().(*[]int64)
+}
+
+var int32ptr = reflect.TypeOf((*int32)(nil))
+
+func (p pointer) toInt32() *int32 {
+	return p.v.Convert(int32ptr).Interface().(*int32)
+}
+
+// The toInt32Ptr/Slice methods don't work because of enums.
+// Instead, we must use set/get methods for the int32ptr/slice case.
+/*
+	func (p pointer) toInt32Ptr() **int32 {
+		return p.v.Interface().(**int32)
+}
+	func (p pointer) toInt32Slice() *[]int32 {
+		return p.v.Interface().(*[]int32)
+}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		return p.v.Elem().Interface().(*int32)
+	}
+	// an enum
+	return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
+}
+func (p pointer) setInt32Ptr(v int32) {
+	// Allocate value in a *int32. Possibly convert that to a *enum.
+	// Then assign it to a **int32 or **enum.
+	// Note: we can convert *int32 to *enum, but we can't convert
+	// **int32 to **enum!
+	p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
+}
+
+// getInt32Slice copies []int32 from p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getInt32Slice() []int32 {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		return p.v.Elem().Interface().([]int32)
+	}
+	// an enum
+	// Allocate a []int32, then assign []enum's values into it.
+	// Note: we can't convert []enum to []int32.
+	slice := p.v.Elem()
+	s := make([]int32, slice.Len())
+	for i := 0; i < slice.Len(); i++ {
+		s[i] = int32(slice.Index(i).Int())
+	}
+	return s
+}
+
+// setInt32Slice copies []int32 into p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setInt32Slice(v []int32) {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		p.v.Elem().Set(reflect.ValueOf(v))
+		return
+	}
+	// an enum
+	// Allocate a []enum, then assign []int32's values into it.
+	// Note: we can't convert []enum to []int32.
+	slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
+	for i, x := range v {
+		slice.Index(i).SetInt(int64(x))
+	}
+	p.v.Elem().Set(slice)
+}
+func (p pointer) appendInt32Slice(v int32) {
+	grow(p.v.Elem()).SetInt(int64(v))
+}
+
+func (p pointer) toUint64() *uint64 {
+	return p.v.Interface().(*uint64)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+	return p.v.Interface().(**uint64)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+	return p.v.Interface().(*[]uint64)
+}
+func (p pointer) toUint32() *uint32 {
+	return p.v.Interface().(*uint32)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+	return p.v.Interface().(**uint32)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+	return p.v.Interface().(*[]uint32)
+}
+func (p pointer) toBool() *bool {
+	return p.v.Interface().(*bool)
+}
+func (p pointer) toBoolPtr() **bool {
+	return p.v.Interface().(**bool)
+}
+func (p pointer) toBoolSlice() *[]bool {
+	return p.v.Interface().(*[]bool)
+}
+func (p pointer) toFloat64() *float64 {
+	return p.v.Interface().(*float64)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+	return p.v.Interface().(**float64)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+	return p.v.Interface().(*[]float64)
+}
+func (p pointer) toFloat32() *float32 {
+	return p.v.Interface().(*float32)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+	return p.v.Interface().(**float32)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+	return p.v.Interface().(*[]float32)
+}
+func (p pointer) toString() *string {
+	return p.v.Interface().(*string)
+}
+func (p pointer) toStringPtr() **string {
+	return p.v.Interface().(**string)
+}
+func (p pointer) toStringSlice() *[]string {
+	return p.v.Interface().(*[]string)
+}
+func (p pointer) toBytes() *[]byte {
+	return p.v.Interface().(*[]byte)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+	return p.v.Interface().(*[][]byte)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+	return p.v.Interface().(*XXX_InternalExtensions)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+	return p.v.Interface().(*map[int32]Extension)
+}
+func (p pointer) getPointer() pointer {
+	return pointer{v: p.v.Elem()}
+}
+func (p pointer) setPointer(q pointer) {
+	p.v.Elem().Set(q.v)
+}
+func (p pointer) appendPointer(q pointer) {
+	grow(p.v.Elem()).Set(q.v)
+}
+
+// getPointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getPointerSlice() []pointer {
+	if p.v.IsNil() {
+		return nil
+	}
+	n := p.v.Elem().Len()
+	s := make([]pointer, n)
+	for i := 0; i < n; i++ {
+		s[i] = pointer{v: p.v.Elem().Index(i)}
+	}
+	return s
+}
+
+// setPointerSlice copies []pointer into p as a new []*T.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setPointerSlice(v []pointer) {
+	if v == nil {
+		p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
+		return
+	}
+	s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
+	for _, p := range v {
+		s = reflect.Append(s, p.v)
+	}
+	p.v.Elem().Set(s)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+	if p.v.Elem().IsNil() {
+		return pointer{v: p.v.Elem()}
+	}
+	return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
+}
+
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+	// TODO: check that p.v.Type().Elem() == t?
+	return p.v
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+
+var atomicLock sync.Mutex
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go
new file mode 100644
index 0000000..7ffd3c2
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go
@@ -0,0 +1,59 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build purego appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+	"reflect"
+)
+
+// TODO: untested, so probably incorrect.
+
+func (p pointer) getRef() pointer {
+	return pointer{v: p.v.Addr()}
+}
+
+func (p pointer) appendRef(v pointer, typ reflect.Type) {
+	slice := p.getSlice(typ)
+	elem := v.asPointerTo(typ).Elem()
+	newSlice := reflect.Append(slice, elem)
+	slice.Set(newSlice)
+}
+
+func (p pointer) getSlice(typ reflect.Type) reflect.Value {
+	sliceTyp := reflect.SliceOf(typ)
+	slice := p.asPointerTo(sliceTyp)
+	slice = slice.Elem()
+	return slice
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..d55a335
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,308 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !purego,!appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+	"reflect"
+	"sync/atomic"
+	"unsafe"
+)
+
+const unsafeAllowed = true
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+	return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// zeroField is a noop when calling pointer.offset.
+const zeroField = field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+	return f != invalidField
+}
+
+// The pointer type below is for the new table-driven encoder/decoder.
+// The implementation here uses unsafe.Pointer to create a generic pointer.
+// In pointer_reflect.go we use reflect instead of unsafe to implement
+// the same (but slower) interface.
+type pointer struct {
+	p unsafe.Pointer
+}
+
+// size of pointer
+var ptrSize = unsafe.Sizeof(uintptr(0))
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+	// Super-tricky - read pointer out of data word of interface value.
+	// Saves ~25ns over the equivalent:
+	// return valToPointer(reflect.ValueOf(*i))
+	return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr bool) pointer {
+	// Super-tricky - read or get the address of data word of interface value.
+	if isptr {
+		// The interface is of pointer type, thus it is a direct interface.
+		// The data word is the pointer data itself. We take its address.
+		return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+	}
+	// The interface is not of pointer type. The data word is the pointer
+	// to the data.
+	return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+	return pointer{p: unsafe.Pointer(v.Pointer())}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+	// For safety, we should panic if !f.IsValid, however calling panic causes
+	// this to no longer be inlineable, which is a serious performance cost.
+	/*
+		if !f.IsValid() {
+			panic("invalid field")
+		}
+	*/
+	return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
+}
+
+func (p pointer) isNil() bool {
+	return p.p == nil
+}
+
+func (p pointer) toInt64() *int64 {
+	return (*int64)(p.p)
+}
+func (p pointer) toInt64Ptr() **int64 {
+	return (**int64)(p.p)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+	return (*[]int64)(p.p)
+}
+func (p pointer) toInt32() *int32 {
+	return (*int32)(p.p)
+}
+
+// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
+/*
+	func (p pointer) toInt32Ptr() **int32 {
+		return (**int32)(p.p)
+	}
+	func (p pointer) toInt32Slice() *[]int32 {
+		return (*[]int32)(p.p)
+	}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+	return *(**int32)(p.p)
+}
+func (p pointer) setInt32Ptr(v int32) {
+	*(**int32)(p.p) = &v
+}
+
+// getInt32Slice loads a []int32 from p.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getInt32Slice() []int32 {
+	return *(*[]int32)(p.p)
+}
+
+// setInt32Slice stores a []int32 to p.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setInt32Slice(v []int32) {
+	*(*[]int32)(p.p) = v
+}
+
+// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
+func (p pointer) appendInt32Slice(v int32) {
+	s := (*[]int32)(p.p)
+	*s = append(*s, v)
+}
+
+func (p pointer) toUint64() *uint64 {
+	return (*uint64)(p.p)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+	return (**uint64)(p.p)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+	return (*[]uint64)(p.p)
+}
+func (p pointer) toUint32() *uint32 {
+	return (*uint32)(p.p)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+	return (**uint32)(p.p)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+	return (*[]uint32)(p.p)
+}
+func (p pointer) toBool() *bool {
+	return (*bool)(p.p)
+}
+func (p pointer) toBoolPtr() **bool {
+	return (**bool)(p.p)
+}
+func (p pointer) toBoolSlice() *[]bool {
+	return (*[]bool)(p.p)
+}
+func (p pointer) toFloat64() *float64 {
+	return (*float64)(p.p)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+	return (**float64)(p.p)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+	return (*[]float64)(p.p)
+}
+func (p pointer) toFloat32() *float32 {
+	return (*float32)(p.p)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+	return (**float32)(p.p)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+	return (*[]float32)(p.p)
+}
+func (p pointer) toString() *string {
+	return (*string)(p.p)
+}
+func (p pointer) toStringPtr() **string {
+	return (**string)(p.p)
+}
+func (p pointer) toStringSlice() *[]string {
+	return (*[]string)(p.p)
+}
+func (p pointer) toBytes() *[]byte {
+	return (*[]byte)(p.p)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+	return (*[][]byte)(p.p)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+	return (*XXX_InternalExtensions)(p.p)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+	return (*map[int32]Extension)(p.p)
+}
+
+// getPointerSlice loads []*T from p as a []pointer.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getPointerSlice() []pointer {
+	// Super-tricky - p should point to a []*T where T is a
+	// message type. We load it as []pointer.
+	return *(*[]pointer)(p.p)
+}
+
+// setPointerSlice stores []pointer into p as a []*T.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setPointerSlice(v []pointer) {
+	// Super-tricky - p should point to a []*T where T is a
+	// message type. We store it as []pointer.
+	*(*[]pointer)(p.p) = v
+}
+
+// getPointer loads the pointer at p and returns it.
+func (p pointer) getPointer() pointer {
+	return pointer{p: *(*unsafe.Pointer)(p.p)}
+}
+
+// setPointer stores the pointer q at p.
+func (p pointer) setPointer(q pointer) {
+	*(*unsafe.Pointer)(p.p) = q.p
+}
+
+// append q to the slice pointed to by p.
+func (p pointer) appendPointer(q pointer) {
+	s := (*[]unsafe.Pointer)(p.p)
+	*s = append(*s, q.p)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+	// Super-tricky - read pointer out of data word of interface value.
+	return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
+}
+
+// asPointerTo returns a reflect.Value that is a pointer to an
+// object of type t stored at p.
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+	return reflect.NewAt(t, p.p)
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+	return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+	return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+	return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+	return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
new file mode 100644
index 0000000..aca8eed
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
@@ -0,0 +1,56 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !purego,!appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+func (p pointer) getRef() pointer {
+	return pointer{p: (unsafe.Pointer)(&p.p)}
+}
+
+func (p pointer) appendRef(v pointer, typ reflect.Type) {
+	slice := p.getSlice(typ)
+	elem := v.asPointerTo(typ).Elem()
+	newSlice := reflect.Append(slice, elem)
+	slice.Set(newSlice)
+}
+
+func (p pointer) getSlice(typ reflect.Type) reflect.Value {
+	sliceTyp := reflect.SliceOf(typ)
+	slice := p.asPointerTo(sliceTyp)
+	slice = slice.Elem()
+	return slice
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go
new file mode 100644
index 0000000..04dcb8d
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/properties.go
@@ -0,0 +1,608 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+	"fmt"
+	"log"
+	"os"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+	WireVarint     = 0
+	WireFixed64    = 1
+	WireBytes      = 2
+	WireStartGroup = 3
+	WireEndGroup   = 4
+	WireFixed32    = 5
+)
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+	fastTags []int
+	slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+	if t > 0 && t < tagMapFastLimit {
+		if t >= len(p.fastTags) {
+			return 0, false
+		}
+		fi := p.fastTags[t]
+		return fi, fi >= 0
+	}
+	fi, ok := p.slowTags[t]
+	return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+	if t > 0 && t < tagMapFastLimit {
+		for len(p.fastTags) < t+1 {
+			p.fastTags = append(p.fastTags, -1)
+		}
+		p.fastTags[t] = fi
+		return
+	}
+	if p.slowTags == nil {
+		p.slowTags = make(map[int]int)
+	}
+	p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+	Prop             []*Properties  // properties for each field
+	reqCount         int            // required count
+	decoderTags      tagMap         // map from proto tag to struct field number
+	decoderOrigNames map[string]int // map from original name to struct field number
+	order            []int          // list of struct field numbers in tag order
+
+	// OneofTypes contains information about the oneof fields in this message.
+	// It is keyed by the original name of a field.
+	OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+	Type  reflect.Type // pointer to generated struct type for this oneof field
+	Field int          // struct field number of the containing oneof in the message
+	Prop  *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+	return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+	Name     string // name of the field, for error messages
+	OrigName string // original name before protocol compiler (always set)
+	JSONName string // name to use for JSON; determined by protoc
+	Wire     string
+	WireType int
+	Tag      int
+	Required bool
+	Optional bool
+	Repeated bool
+	Packed   bool   // relevant for repeated primitives only
+	Enum     string // set for enum types only
+	proto3   bool   // whether this is known to be a proto3 field
+	oneof    bool   // whether this is a oneof field
+
+	Default     string // default value
+	HasDefault  bool   // whether an explicit default was provided
+	CustomType  string
+	CastType    string
+	StdTime     bool
+	StdDuration bool
+	WktPointer  bool
+
+	stype reflect.Type      // set for struct types only
+	ctype reflect.Type      // set for custom types only
+	sprop *StructProperties // set for struct types only
+
+	mtype      reflect.Type // set for map types only
+	MapKeyProp *Properties  // set for map types only
+	MapValProp *Properties  // set for map types only
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+	s := p.Wire
+	s += ","
+	s += strconv.Itoa(p.Tag)
+	if p.Required {
+		s += ",req"
+	}
+	if p.Optional {
+		s += ",opt"
+	}
+	if p.Repeated {
+		s += ",rep"
+	}
+	if p.Packed {
+		s += ",packed"
+	}
+	s += ",name=" + p.OrigName
+	if p.JSONName != p.OrigName {
+		s += ",json=" + p.JSONName
+	}
+	if p.proto3 {
+		s += ",proto3"
+	}
+	if p.oneof {
+		s += ",oneof"
+	}
+	if len(p.Enum) > 0 {
+		s += ",enum=" + p.Enum
+	}
+	if p.HasDefault {
+		s += ",def=" + p.Default
+	}
+	return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+	// "bytes,49,opt,name=foo,def=hello!"
+	fields := strings.Split(s, ",") // breaks def=, but handled below.
+	if len(fields) < 2 {
+		fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+		return
+	}
+
+	p.Wire = fields[0]
+	switch p.Wire {
+	case "varint":
+		p.WireType = WireVarint
+	case "fixed32":
+		p.WireType = WireFixed32
+	case "fixed64":
+		p.WireType = WireFixed64
+	case "zigzag32":
+		p.WireType = WireVarint
+	case "zigzag64":
+		p.WireType = WireVarint
+	case "bytes", "group":
+		p.WireType = WireBytes
+		// no numeric converter for non-numeric types
+	default:
+		fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+		return
+	}
+
+	var err error
+	p.Tag, err = strconv.Atoi(fields[1])
+	if err != nil {
+		return
+	}
+
+outer:
+	for i := 2; i < len(fields); i++ {
+		f := fields[i]
+		switch {
+		case f == "req":
+			p.Required = true
+		case f == "opt":
+			p.Optional = true
+		case f == "rep":
+			p.Repeated = true
+		case f == "packed":
+			p.Packed = true
+		case strings.HasPrefix(f, "name="):
+			p.OrigName = f[5:]
+		case strings.HasPrefix(f, "json="):
+			p.JSONName = f[5:]
+		case strings.HasPrefix(f, "enum="):
+			p.Enum = f[5:]
+		case f == "proto3":
+			p.proto3 = true
+		case f == "oneof":
+			p.oneof = true
+		case strings.HasPrefix(f, "def="):
+			p.HasDefault = true
+			p.Default = f[4:] // rest of string
+			if i+1 < len(fields) {
+				// Commas aren't escaped, and def is always last.
+				p.Default += "," + strings.Join(fields[i+1:], ",")
+				break outer
+			}
+		case strings.HasPrefix(f, "embedded="):
+			p.OrigName = strings.Split(f, "=")[1]
+		case strings.HasPrefix(f, "customtype="):
+			p.CustomType = strings.Split(f, "=")[1]
+		case strings.HasPrefix(f, "casttype="):
+			p.CastType = strings.Split(f, "=")[1]
+		case f == "stdtime":
+			p.StdTime = true
+		case f == "stdduration":
+			p.StdDuration = true
+		case f == "wktptr":
+			p.WktPointer = true
+		}
+	}
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// setFieldProps initializes the field properties for submessages and maps.
+func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+	isMap := typ.Kind() == reflect.Map
+	if len(p.CustomType) > 0 && !isMap {
+		p.ctype = typ
+		p.setTag(lockGetProp)
+		return
+	}
+	if p.StdTime && !isMap {
+		p.setTag(lockGetProp)
+		return
+	}
+	if p.StdDuration && !isMap {
+		p.setTag(lockGetProp)
+		return
+	}
+	if p.WktPointer && !isMap {
+		p.setTag(lockGetProp)
+		return
+	}
+	switch t1 := typ; t1.Kind() {
+	case reflect.Struct:
+		p.stype = typ
+	case reflect.Ptr:
+		if t1.Elem().Kind() == reflect.Struct {
+			p.stype = t1.Elem()
+		}
+	case reflect.Slice:
+		switch t2 := t1.Elem(); t2.Kind() {
+		case reflect.Ptr:
+			switch t3 := t2.Elem(); t3.Kind() {
+			case reflect.Struct:
+				p.stype = t3
+			}
+		case reflect.Struct:
+			p.stype = t2
+		}
+
+	case reflect.Map:
+
+		p.mtype = t1
+		p.MapKeyProp = &Properties{}
+		p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+		p.MapValProp = &Properties{}
+		vtype := p.mtype.Elem()
+		if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+			// The value type is not a message (*T) or bytes ([]byte),
+			// so we need encoders for the pointer to this type.
+			vtype = reflect.PtrTo(vtype)
+		}
+
+		p.MapValProp.CustomType = p.CustomType
+		p.MapValProp.StdDuration = p.StdDuration
+		p.MapValProp.StdTime = p.StdTime
+		p.MapValProp.WktPointer = p.WktPointer
+		p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+	}
+	p.setTag(lockGetProp)
+}
+
+func (p *Properties) setTag(lockGetProp bool) {
+	if p.stype != nil {
+		if lockGetProp {
+			p.sprop = GetProperties(p.stype)
+		} else {
+			p.sprop = getPropertiesLocked(p.stype)
+		}
+	}
+}
+
+var (
+	marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+)
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+	p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+	// "bytes,49,opt,def=hello!"
+	p.Name = name
+	p.OrigName = name
+	if tag == "" {
+		return
+	}
+	p.Parse(tag)
+	p.setFieldProps(typ, f, lockGetProp)
+}
+
+var (
+	propertiesMu  sync.RWMutex
+	propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+	if t.Kind() != reflect.Struct {
+		panic("proto: type must have kind struct")
+	}
+
+	// Most calls to GetProperties in a long-running program will be
+	// retrieving details for types we have seen before.
+	propertiesMu.RLock()
+	sprop, ok := propertiesMap[t]
+	propertiesMu.RUnlock()
+	if ok {
+		if collectStats {
+			stats.Chit++
+		}
+		return sprop
+	}
+
+	propertiesMu.Lock()
+	sprop = getPropertiesLocked(t)
+	propertiesMu.Unlock()
+	return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+	if prop, ok := propertiesMap[t]; ok {
+		if collectStats {
+			stats.Chit++
+		}
+		return prop
+	}
+	if collectStats {
+		stats.Cmiss++
+	}
+
+	prop := new(StructProperties)
+	// in case of recursive protos, fill this in now.
+	propertiesMap[t] = prop
+
+	// build properties
+	prop.Prop = make([]*Properties, t.NumField())
+	prop.order = make([]int, t.NumField())
+
+	isOneofMessage := false
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		p := new(Properties)
+		name := f.Name
+		p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+		oneof := f.Tag.Get("protobuf_oneof") // special case
+		if oneof != "" {
+			isOneofMessage = true
+			// Oneof fields don't use the traditional protobuf tag.
+			p.OrigName = oneof
+		}
+		prop.Prop[i] = p
+		prop.order[i] = i
+		if debug {
+			print(i, " ", f.Name, " ", t.String(), " ")
+			if p.Tag > 0 {
+				print(p.String())
+			}
+			print("\n")
+		}
+	}
+
+	// Re-order prop.order.
+	sort.Sort(prop)
+
+	type oneofMessage interface {
+		XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+	}
+	if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok {
+		var oots []interface{}
+		_, _, _, oots = om.XXX_OneofFuncs()
+
+		// Interpret oneof metadata.
+		prop.OneofTypes = make(map[string]*OneofProperties)
+		for _, oot := range oots {
+			oop := &OneofProperties{
+				Type: reflect.ValueOf(oot).Type(), // *T
+				Prop: new(Properties),
+			}
+			sft := oop.Type.Elem().Field(0)
+			oop.Prop.Name = sft.Name
+			oop.Prop.Parse(sft.Tag.Get("protobuf"))
+			// There will be exactly one interface field that
+			// this new value is assignable to.
+			for i := 0; i < t.NumField(); i++ {
+				f := t.Field(i)
+				if f.Type.Kind() != reflect.Interface {
+					continue
+				}
+				if !oop.Type.AssignableTo(f.Type) {
+					continue
+				}
+				oop.Field = i
+				break
+			}
+			prop.OneofTypes[oop.Prop.OrigName] = oop
+		}
+	}
+
+	// build required counts
+	// build tags
+	reqCount := 0
+	prop.decoderOrigNames = make(map[string]int)
+	for i, p := range prop.Prop {
+		if strings.HasPrefix(p.Name, "XXX_") {
+			// Internal fields should not appear in tags/origNames maps.
+			// They are handled specially when encoding and decoding.
+			continue
+		}
+		if p.Required {
+			reqCount++
+		}
+		prop.decoderTags.put(p.Tag, i)
+		prop.decoderOrigNames[p.OrigName] = i
+	}
+	prop.reqCount = reqCount
+
+	return prop
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+var enumStringMaps = make(map[string]map[int32]string)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+	if _, ok := enumValueMaps[typeName]; ok {
+		panic("proto: duplicate enum registered: " + typeName)
+	}
+	enumValueMaps[typeName] = valueMap
+	if _, ok := enumStringMaps[typeName]; ok {
+		panic("proto: duplicate enum registered: " + typeName)
+	}
+	enumStringMaps[typeName] = unusedNameMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+	return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+	protoTypedNils = make(map[string]Message)      // a map from proto names to typed nil pointers
+	protoMapTypes  = make(map[string]reflect.Type) // a map from proto names to map types
+	revProtoTypes  = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+	if _, ok := protoTypedNils[name]; ok {
+		// TODO: Some day, make this a panic.
+		log.Printf("proto: duplicate proto type registered: %s", name)
+		return
+	}
+	t := reflect.TypeOf(x)
+	if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
+		// Generated code always calls RegisterType with nil x.
+		// This check is just for extra safety.
+		protoTypedNils[name] = x
+	} else {
+		protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
+	}
+	revProtoTypes[t] = name
+}
+
+// RegisterMapType is called from generated code and maps from the fully qualified
+// proto name to the native map type of the proto map definition.
+func RegisterMapType(x interface{}, name string) {
+	if reflect.TypeOf(x).Kind() != reflect.Map {
+		panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
+	}
+	if _, ok := protoMapTypes[name]; ok {
+		log.Printf("proto: duplicate proto type registered: %s", name)
+		return
+	}
+	t := reflect.TypeOf(x)
+	protoMapTypes[name] = t
+	revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+	type xname interface {
+		XXX_MessageName() string
+	}
+	if m, ok := x.(xname); ok {
+		return m.XXX_MessageName()
+	}
+	return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+// The type is not guaranteed to implement proto.Message if the name refers to a
+// map entry.
+func MessageType(name string) reflect.Type {
+	if t, ok := protoTypedNils[name]; ok {
+		return reflect.TypeOf(t)
+	}
+	return protoMapTypes[name]
+}
+
+// A registry of all linked proto files.
+var (
+	protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+	protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go
new file mode 100644
index 0000000..40ea3dd
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go
@@ -0,0 +1,36 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"reflect"
+)
+
+var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem()
+var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem()
diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go
new file mode 100644
index 0000000..5a5fd93
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go
@@ -0,0 +1,119 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"io"
+)
+
+func Skip(data []byte) (n int, err error) {
+	l := len(data)
+	index := 0
+	for index < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if index >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := data[index]
+			index++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for {
+				if index >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				index++
+				if data[index-1] < 0x80 {
+					break
+				}
+			}
+			return index, nil
+		case 1:
+			index += 8
+			return index, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if index >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := data[index]
+				index++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			index += length
+			return index, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = index
+				for shift := uint(0); ; shift += 7 {
+					if index >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := data[index]
+					index++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := Skip(data[start:])
+				if err != nil {
+					return 0, err
+				}
+				index = start + next
+			}
+			return index, nil
+		case 4:
+			return index, nil
+		case 5:
+			index += 4
+			return index, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go
new file mode 100644
index 0000000..ba58c49
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go
@@ -0,0 +1,3006 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"unicode/utf8"
+)
+
+// a sizer takes a pointer to a field and the size of its tag, computes the size of
+// the encoded data.
+type sizer func(pointer, int) int
+
+// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
+// marshals the field to the end of the slice, returns the slice and error (if any).
+type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
+
+// marshalInfo is the information used for marshaling a message.
+type marshalInfo struct {
+	typ          reflect.Type
+	fields       []*marshalFieldInfo
+	unrecognized field                      // offset of XXX_unrecognized
+	extensions   field                      // offset of XXX_InternalExtensions
+	v1extensions field                      // offset of XXX_extensions
+	sizecache    field                      // offset of XXX_sizecache
+	initialized  int32                      // 0 -- only typ is set, 1 -- fully initialized
+	messageset   bool                       // uses message set wire format
+	hasmarshaler bool                       // has custom marshaler
+	sync.RWMutex                            // protect extElems map, also for initialization
+	extElems     map[int32]*marshalElemInfo // info of extension elements
+
+	hassizer      bool // has custom sizer
+	hasprotosizer bool // has custom protosizer
+
+	bytesExtensions field // offset of XXX_extensions where the field type is []byte
+}
+
+// marshalFieldInfo is the information used for marshaling a field of a message.
+type marshalFieldInfo struct {
+	field      field
+	wiretag    uint64 // tag in wire format
+	tagsize    int    // size of tag in wire format
+	sizer      sizer
+	marshaler  marshaler
+	isPointer  bool
+	required   bool                              // field is required
+	name       string                            // name of the field, for error reporting
+	oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
+}
+
+// marshalElemInfo is the information used for marshaling an extension or oneof element.
+type marshalElemInfo struct {
+	wiretag   uint64 // tag in wire format
+	tagsize   int    // size of tag in wire format
+	sizer     sizer
+	marshaler marshaler
+	isptr     bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+}
+
+var (
+	marshalInfoMap  = map[reflect.Type]*marshalInfo{}
+	marshalInfoLock sync.Mutex
+
+	uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind()
+)
+
+// getMarshalInfo returns the information to marshal a given type of message.
+// The info it returns may not necessarily initialized.
+// t is the type of the message (NOT the pointer to it).
+func getMarshalInfo(t reflect.Type) *marshalInfo {
+	marshalInfoLock.Lock()
+	u, ok := marshalInfoMap[t]
+	if !ok {
+		u = &marshalInfo{typ: t}
+		marshalInfoMap[t] = u
+	}
+	marshalInfoLock.Unlock()
+	return u
+}
+
+// Size is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It computes the size of encoded data of msg.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Size(msg Message) int {
+	u := getMessageMarshalInfo(msg, a)
+	ptr := toPointer(&msg)
+	if ptr.isNil() {
+		// We get here if msg is a typed nil ((*SomeMessage)(nil)),
+		// so it satisfies the interface, and msg == nil wouldn't
+		// catch it. We don't want crash in this case.
+		return 0
+	}
+	return u.size(ptr)
+}
+
+// Marshal is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It marshals msg to the end of b.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
+	u := getMessageMarshalInfo(msg, a)
+	ptr := toPointer(&msg)
+	if ptr.isNil() {
+		// We get here if msg is a typed nil ((*SomeMessage)(nil)),
+		// so it satisfies the interface, and msg == nil wouldn't
+		// catch it. We don't want crash in this case.
+		return b, ErrNil
+	}
+	return u.marshal(b, ptr, deterministic)
+}
+
+func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
+	// u := a.marshal, but atomically.
+	// We use an atomic here to ensure memory consistency.
+	u := atomicLoadMarshalInfo(&a.marshal)
+	if u == nil {
+		// Get marshal information from type of message.
+		t := reflect.ValueOf(msg).Type()
+		if t.Kind() != reflect.Ptr {
+			panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
+		}
+		u = getMarshalInfo(t.Elem())
+		// Store it in the cache for later users.
+		// a.marshal = u, but atomically.
+		atomicStoreMarshalInfo(&a.marshal, u)
+	}
+	return u
+}
+
+// size is the main function to compute the size of the encoded data of a message.
+// ptr is the pointer to the message.
+func (u *marshalInfo) size(ptr pointer) int {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeMarshalInfo()
+	}
+
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if u.hasmarshaler {
+		// Uses the message's Size method if available
+		if u.hassizer {
+			s := ptr.asPointerTo(u.typ).Interface().(Sizer)
+			return s.Size()
+		}
+		// Uses the message's ProtoSize method if available
+		if u.hasprotosizer {
+			s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer)
+			return s.ProtoSize()
+		}
+
+		m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+		b, _ := m.Marshal()
+		return len(b)
+	}
+
+	n := 0
+	for _, f := range u.fields {
+		if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+			// nil pointer always marshals to nothing
+			continue
+		}
+		n += f.sizer(ptr.offset(f.field), f.tagsize)
+	}
+	if u.extensions.IsValid() {
+		e := ptr.offset(u.extensions).toExtensions()
+		if u.messageset {
+			n += u.sizeMessageSet(e)
+		} else {
+			n += u.sizeExtensions(e)
+		}
+	}
+	if u.v1extensions.IsValid() {
+		m := *ptr.offset(u.v1extensions).toOldExtensions()
+		n += u.sizeV1Extensions(m)
+	}
+	if u.bytesExtensions.IsValid() {
+		s := *ptr.offset(u.bytesExtensions).toBytes()
+		n += len(s)
+	}
+	if u.unrecognized.IsValid() {
+		s := *ptr.offset(u.unrecognized).toBytes()
+		n += len(s)
+	}
+
+	// cache the result for use in marshal
+	if u.sizecache.IsValid() {
+		atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
+	}
+	return n
+}
+
+// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
+// fall back to compute the size.
+func (u *marshalInfo) cachedsize(ptr pointer) int {
+	if u.sizecache.IsValid() {
+		return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
+	}
+	return u.size(ptr)
+}
+
+// marshal is the main function to marshal a message. It takes a byte slice and appends
+// the encoded data to the end of the slice, returns the slice and error (if any).
+// ptr is the pointer to the message.
+// If deterministic is true, map is marshaled in deterministic order.
+func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeMarshalInfo()
+	}
+
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if u.hasmarshaler {
+		m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+		b1, err := m.Marshal()
+		b = append(b, b1...)
+		return b, err
+	}
+
+	var err, errLater error
+	// The old marshaler encodes extensions at beginning.
+	if u.extensions.IsValid() {
+		e := ptr.offset(u.extensions).toExtensions()
+		if u.messageset {
+			b, err = u.appendMessageSet(b, e, deterministic)
+		} else {
+			b, err = u.appendExtensions(b, e, deterministic)
+		}
+		if err != nil {
+			return b, err
+		}
+	}
+	if u.v1extensions.IsValid() {
+		m := *ptr.offset(u.v1extensions).toOldExtensions()
+		b, err = u.appendV1Extensions(b, m, deterministic)
+		if err != nil {
+			return b, err
+		}
+	}
+	if u.bytesExtensions.IsValid() {
+		s := *ptr.offset(u.bytesExtensions).toBytes()
+		b = append(b, s...)
+	}
+	for _, f := range u.fields {
+		if f.required {
+			if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+				// Required field is not set.
+				// We record the error but keep going, to give a complete marshaling.
+				if errLater == nil {
+					errLater = &RequiredNotSetError{f.name}
+				}
+				continue
+			}
+		}
+		if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+			// nil pointer always marshals to nothing
+			continue
+		}
+		b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
+		if err != nil {
+			if err1, ok := err.(*RequiredNotSetError); ok {
+				// Required field in submessage is not set.
+				// We record the error but keep going, to give a complete marshaling.
+				if errLater == nil {
+					errLater = &RequiredNotSetError{f.name + "." + err1.field}
+				}
+				continue
+			}
+			if err == errRepeatedHasNil {
+				err = errors.New("proto: repeated field " + f.name + " has nil element")
+			}
+			if err == errInvalidUTF8 {
+				if errLater == nil {
+					fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+					errLater = &invalidUTF8Error{fullName}
+				}
+				continue
+			}
+			return b, err
+		}
+	}
+	if u.unrecognized.IsValid() {
+		s := *ptr.offset(u.unrecognized).toBytes()
+		b = append(b, s...)
+	}
+	return b, errLater
+}
+
+// computeMarshalInfo initializes the marshal info.
+func (u *marshalInfo) computeMarshalInfo() {
+	u.Lock()
+	defer u.Unlock()
+	if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
+		return
+	}
+
+	t := u.typ
+	u.unrecognized = invalidField
+	u.extensions = invalidField
+	u.v1extensions = invalidField
+	u.bytesExtensions = invalidField
+	u.sizecache = invalidField
+	isOneofMessage := false
+
+	if reflect.PtrTo(t).Implements(sizerType) {
+		u.hassizer = true
+	}
+	if reflect.PtrTo(t).Implements(protosizerType) {
+		u.hasprotosizer = true
+	}
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if reflect.PtrTo(t).Implements(marshalerType) {
+		u.hasmarshaler = true
+		atomic.StoreInt32(&u.initialized, 1)
+		return
+	}
+
+	n := t.NumField()
+
+	// deal with XXX fields first
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		if f.Tag.Get("protobuf_oneof") != "" {
+			isOneofMessage = true
+		}
+		if !strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		switch f.Name {
+		case "XXX_sizecache":
+			u.sizecache = toField(&f)
+		case "XXX_unrecognized":
+			u.unrecognized = toField(&f)
+		case "XXX_InternalExtensions":
+			u.extensions = toField(&f)
+			u.messageset = f.Tag.Get("protobuf_messageset") == "1"
+		case "XXX_extensions":
+			if f.Type.Kind() == reflect.Map {
+				u.v1extensions = toField(&f)
+			} else {
+				u.bytesExtensions = toField(&f)
+			}
+		case "XXX_NoUnkeyedLiteral":
+			// nothing to do
+		default:
+			panic("unknown XXX field: " + f.Name)
+		}
+		n--
+	}
+
+	// get oneof implementers
+	var oneofImplementers []interface{}
+	// gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler
+	if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok && isOneofMessage {
+		_, _, _, oneofImplementers = m.XXX_OneofFuncs()
+	}
+
+	// normal fields
+	fields := make([]marshalFieldInfo, n) // batch allocation
+	u.fields = make([]*marshalFieldInfo, 0, n)
+	for i, j := 0, 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		field := &fields[j]
+		j++
+		field.name = f.Name
+		u.fields = append(u.fields, field)
+		if f.Tag.Get("protobuf_oneof") != "" {
+			field.computeOneofFieldInfo(&f, oneofImplementers)
+			continue
+		}
+		if f.Tag.Get("protobuf") == "" {
+			// field has no tag (not in generated message), ignore it
+			u.fields = u.fields[:len(u.fields)-1]
+			j--
+			continue
+		}
+		field.computeMarshalFieldInfo(&f)
+	}
+
+	// fields are marshaled in tag order on the wire.
+	sort.Sort(byTag(u.fields))
+
+	atomic.StoreInt32(&u.initialized, 1)
+}
+
+// helper for sorting fields by tag
+type byTag []*marshalFieldInfo
+
+func (a byTag) Len() int           { return len(a) }
+func (a byTag) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
+
+// getExtElemInfo returns the information to marshal an extension element.
+// The info it returns is initialized.
+func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
+	// get from cache first
+	u.RLock()
+	e, ok := u.extElems[desc.Field]
+	u.RUnlock()
+	if ok {
+		return e
+	}
+
+	t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
+	tags := strings.Split(desc.Tag, ",")
+	tag, err := strconv.Atoi(tags[1])
+	if err != nil {
+		panic("tag is not an integer")
+	}
+	wt := wiretype(tags[0])
+	sizr, marshalr := typeMarshaler(t, tags, false, false)
+	e = &marshalElemInfo{
+		wiretag:   uint64(tag)<<3 | wt,
+		tagsize:   SizeVarint(uint64(tag) << 3),
+		sizer:     sizr,
+		marshaler: marshalr,
+		isptr:     t.Kind() == reflect.Ptr,
+	}
+
+	// update cache
+	u.Lock()
+	if u.extElems == nil {
+		u.extElems = make(map[int32]*marshalElemInfo)
+	}
+	u.extElems[desc.Field] = e
+	u.Unlock()
+	return e
+}
+
+// computeMarshalFieldInfo fills up the information to marshal a field.
+func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
+	// parse protobuf tag of the field.
+	// tag has format of "bytes,49,opt,name=foo,def=hello!"
+	tags := strings.Split(f.Tag.Get("protobuf"), ",")
+	if tags[0] == "" {
+		return
+	}
+	tag, err := strconv.Atoi(tags[1])
+	if err != nil {
+		panic("tag is not an integer")
+	}
+	wt := wiretype(tags[0])
+	if tags[2] == "req" {
+		fi.required = true
+	}
+	fi.setTag(f, tag, wt)
+	fi.setMarshaler(f, tags)
+}
+
+func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
+	fi.field = toField(f)
+	fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+	fi.isPointer = true
+	fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
+	fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
+
+	ityp := f.Type // interface type
+	for _, o := range oneofImplementers {
+		t := reflect.TypeOf(o)
+		if !t.Implements(ityp) {
+			continue
+		}
+		sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
+		tags := strings.Split(sf.Tag.Get("protobuf"), ",")
+		tag, err := strconv.Atoi(tags[1])
+		if err != nil {
+			panic("tag is not an integer")
+		}
+		wt := wiretype(tags[0])
+		sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
+		fi.oneofElems[t.Elem()] = &marshalElemInfo{
+			wiretag:   uint64(tag)<<3 | wt,
+			tagsize:   SizeVarint(uint64(tag) << 3),
+			sizer:     sizr,
+			marshaler: marshalr,
+		}
+	}
+}
+
+type oneofMessage interface {
+	XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+}
+
+// wiretype returns the wire encoding of the type.
+func wiretype(encoding string) uint64 {
+	switch encoding {
+	case "fixed32":
+		return WireFixed32
+	case "fixed64":
+		return WireFixed64
+	case "varint", "zigzag32", "zigzag64":
+		return WireVarint
+	case "bytes":
+		return WireBytes
+	case "group":
+		return WireStartGroup
+	}
+	panic("unknown wire type " + encoding)
+}
+
+// setTag fills up the tag (in wire format) and its size in the info of a field.
+func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
+	fi.field = toField(f)
+	fi.wiretag = uint64(tag)<<3 | wt
+	fi.tagsize = SizeVarint(uint64(tag) << 3)
+}
+
+// setMarshaler fills up the sizer and marshaler in the info of a field.
+func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
+	switch f.Type.Kind() {
+	case reflect.Map:
+		// map field
+		fi.isPointer = true
+		fi.sizer, fi.marshaler = makeMapMarshaler(f)
+		return
+	case reflect.Ptr, reflect.Slice:
+		fi.isPointer = true
+	}
+	fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
+}
+
+// typeMarshaler returns the sizer and marshaler of a given field.
+// t is the type of the field.
+// tags is the generated "protobuf" tag of the field.
+// If nozero is true, zero value is not marshaled to the wire.
+// If oneof is true, it is a oneof field.
+func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
+	encoding := tags[0]
+
+	pointer := false
+	slice := false
+	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+		slice = true
+		t = t.Elem()
+	}
+	if t.Kind() == reflect.Ptr {
+		pointer = true
+		t = t.Elem()
+	}
+
+	packed := false
+	proto3 := false
+	ctype := false
+	isTime := false
+	isDuration := false
+	isWktPointer := false
+	validateUTF8 := true
+	for i := 2; i < len(tags); i++ {
+		if tags[i] == "packed" {
+			packed = true
+		}
+		if tags[i] == "proto3" {
+			proto3 = true
+		}
+		if strings.HasPrefix(tags[i], "customtype=") {
+			ctype = true
+		}
+		if tags[i] == "stdtime" {
+			isTime = true
+		}
+		if tags[i] == "stdduration" {
+			isDuration = true
+		}
+		if tags[i] == "wktptr" {
+			isWktPointer = true
+		}
+	}
+	validateUTF8 = validateUTF8 && proto3
+	if !proto3 && !pointer && !slice {
+		nozero = false
+	}
+
+	if ctype {
+		if reflect.PtrTo(t).Implements(customType) {
+			if slice {
+				return makeMessageRefSliceMarshaler(getMarshalInfo(t))
+			}
+			if pointer {
+				return makeCustomPtrMarshaler(getMarshalInfo(t))
+			}
+			return makeCustomMarshaler(getMarshalInfo(t))
+		} else {
+			panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t))
+		}
+	}
+
+	if isTime {
+		if pointer {
+			if slice {
+				return makeTimePtrSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeTimePtrMarshaler(getMarshalInfo(t))
+		}
+		if slice {
+			return makeTimeSliceMarshaler(getMarshalInfo(t))
+		}
+		return makeTimeMarshaler(getMarshalInfo(t))
+	}
+
+	if isDuration {
+		if pointer {
+			if slice {
+				return makeDurationPtrSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeDurationPtrMarshaler(getMarshalInfo(t))
+		}
+		if slice {
+			return makeDurationSliceMarshaler(getMarshalInfo(t))
+		}
+		return makeDurationMarshaler(getMarshalInfo(t))
+	}
+
+	if isWktPointer {
+		switch t.Kind() {
+		case reflect.Float64:
+			if pointer {
+				if slice {
+					return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdDoubleValueMarshaler(getMarshalInfo(t))
+		case reflect.Float32:
+			if pointer {
+				if slice {
+					return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdFloatValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdFloatValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdFloatValueMarshaler(getMarshalInfo(t))
+		case reflect.Int64:
+			if pointer {
+				if slice {
+					return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdInt64ValueMarshaler(getMarshalInfo(t))
+		case reflect.Uint64:
+			if pointer {
+				if slice {
+					return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdUInt64ValueMarshaler(getMarshalInfo(t))
+		case reflect.Int32:
+			if pointer {
+				if slice {
+					return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdInt32ValueMarshaler(getMarshalInfo(t))
+		case reflect.Uint32:
+			if pointer {
+				if slice {
+					return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdUInt32ValueMarshaler(getMarshalInfo(t))
+		case reflect.Bool:
+			if pointer {
+				if slice {
+					return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdBoolValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdBoolValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdBoolValueMarshaler(getMarshalInfo(t))
+		case reflect.String:
+			if pointer {
+				if slice {
+					return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdStringValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdStringValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdStringValueMarshaler(getMarshalInfo(t))
+		case uint8SliceType:
+			if pointer {
+				if slice {
+					return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdBytesValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdBytesValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdBytesValueMarshaler(getMarshalInfo(t))
+		default:
+			panic(fmt.Sprintf("unknown wktpointer type %#v", t))
+		}
+	}
+
+	switch t.Kind() {
+	case reflect.Bool:
+		if pointer {
+			return sizeBoolPtr, appendBoolPtr
+		}
+		if slice {
+			if packed {
+				return sizeBoolPackedSlice, appendBoolPackedSlice
+			}
+			return sizeBoolSlice, appendBoolSlice
+		}
+		if nozero {
+			return sizeBoolValueNoZero, appendBoolValueNoZero
+		}
+		return sizeBoolValue, appendBoolValue
+	case reflect.Uint32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return sizeFixed32Ptr, appendFixed32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixed32PackedSlice, appendFixed32PackedSlice
+				}
+				return sizeFixed32Slice, appendFixed32Slice
+			}
+			if nozero {
+				return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
+			}
+			return sizeFixed32Value, appendFixed32Value
+		case "varint":
+			if pointer {
+				return sizeVarint32Ptr, appendVarint32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarint32PackedSlice, appendVarint32PackedSlice
+				}
+				return sizeVarint32Slice, appendVarint32Slice
+			}
+			if nozero {
+				return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
+			}
+			return sizeVarint32Value, appendVarint32Value
+		}
+	case reflect.Int32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return sizeFixedS32Ptr, appendFixedS32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
+				}
+				return sizeFixedS32Slice, appendFixedS32Slice
+			}
+			if nozero {
+				return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
+			}
+			return sizeFixedS32Value, appendFixedS32Value
+		case "varint":
+			if pointer {
+				return sizeVarintS32Ptr, appendVarintS32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
+				}
+				return sizeVarintS32Slice, appendVarintS32Slice
+			}
+			if nozero {
+				return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
+			}
+			return sizeVarintS32Value, appendVarintS32Value
+		case "zigzag32":
+			if pointer {
+				return sizeZigzag32Ptr, appendZigzag32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
+				}
+				return sizeZigzag32Slice, appendZigzag32Slice
+			}
+			if nozero {
+				return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
+			}
+			return sizeZigzag32Value, appendZigzag32Value
+		}
+	case reflect.Uint64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return sizeFixed64Ptr, appendFixed64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixed64PackedSlice, appendFixed64PackedSlice
+				}
+				return sizeFixed64Slice, appendFixed64Slice
+			}
+			if nozero {
+				return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
+			}
+			return sizeFixed64Value, appendFixed64Value
+		case "varint":
+			if pointer {
+				return sizeVarint64Ptr, appendVarint64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarint64PackedSlice, appendVarint64PackedSlice
+				}
+				return sizeVarint64Slice, appendVarint64Slice
+			}
+			if nozero {
+				return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
+			}
+			return sizeVarint64Value, appendVarint64Value
+		}
+	case reflect.Int64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return sizeFixedS64Ptr, appendFixedS64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
+				}
+				return sizeFixedS64Slice, appendFixedS64Slice
+			}
+			if nozero {
+				return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
+			}
+			return sizeFixedS64Value, appendFixedS64Value
+		case "varint":
+			if pointer {
+				return sizeVarintS64Ptr, appendVarintS64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
+				}
+				return sizeVarintS64Slice, appendVarintS64Slice
+			}
+			if nozero {
+				return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
+			}
+			return sizeVarintS64Value, appendVarintS64Value
+		case "zigzag64":
+			if pointer {
+				return sizeZigzag64Ptr, appendZigzag64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
+				}
+				return sizeZigzag64Slice, appendZigzag64Slice
+			}
+			if nozero {
+				return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
+			}
+			return sizeZigzag64Value, appendZigzag64Value
+		}
+	case reflect.Float32:
+		if pointer {
+			return sizeFloat32Ptr, appendFloat32Ptr
+		}
+		if slice {
+			if packed {
+				return sizeFloat32PackedSlice, appendFloat32PackedSlice
+			}
+			return sizeFloat32Slice, appendFloat32Slice
+		}
+		if nozero {
+			return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
+		}
+		return sizeFloat32Value, appendFloat32Value
+	case reflect.Float64:
+		if pointer {
+			return sizeFloat64Ptr, appendFloat64Ptr
+		}
+		if slice {
+			if packed {
+				return sizeFloat64PackedSlice, appendFloat64PackedSlice
+			}
+			return sizeFloat64Slice, appendFloat64Slice
+		}
+		if nozero {
+			return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
+		}
+		return sizeFloat64Value, appendFloat64Value
+	case reflect.String:
+		if validateUTF8 {
+			if pointer {
+				return sizeStringPtr, appendUTF8StringPtr
+			}
+			if slice {
+				return sizeStringSlice, appendUTF8StringSlice
+			}
+			if nozero {
+				return sizeStringValueNoZero, appendUTF8StringValueNoZero
+			}
+			return sizeStringValue, appendUTF8StringValue
+		}
+		if pointer {
+			return sizeStringPtr, appendStringPtr
+		}
+		if slice {
+			return sizeStringSlice, appendStringSlice
+		}
+		if nozero {
+			return sizeStringValueNoZero, appendStringValueNoZero
+		}
+		return sizeStringValue, appendStringValue
+	case reflect.Slice:
+		if slice {
+			return sizeBytesSlice, appendBytesSlice
+		}
+		if oneof {
+			// Oneof bytes field may also have "proto3" tag.
+			// We want to marshal it as a oneof field. Do this
+			// check before the proto3 check.
+			return sizeBytesOneof, appendBytesOneof
+		}
+		if proto3 {
+			return sizeBytes3, appendBytes3
+		}
+		return sizeBytes, appendBytes
+	case reflect.Struct:
+		switch encoding {
+		case "group":
+			if slice {
+				return makeGroupSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeGroupMarshaler(getMarshalInfo(t))
+		case "bytes":
+			if pointer {
+				if slice {
+					return makeMessageSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeMessageMarshaler(getMarshalInfo(t))
+			} else {
+				if slice {
+					return makeMessageRefSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeMessageRefMarshaler(getMarshalInfo(t))
+			}
+		}
+	}
+	panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
+}
+
+// Below are functions to size/marshal a specific type of a field.
+// They are stored in the field's info, and called by function pointers.
+// They have type sizer or marshaler.
+
+func sizeFixed32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixed32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixed32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixedS32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixedS32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFloat32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
+	v := math.Float32bits(*ptr.toFloat32())
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFloat32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toFloat32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFloat32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixed64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixed64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixed64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFixedS64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixedS64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFloat64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
+	v := math.Float64bits(*ptr.toFloat64())
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFloat64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toFloat64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFloat64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeVarint32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarint32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarint64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	return SizeVarint(v) + tagsize
+}
+func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(v) + tagsize
+}
+func sizeVarint64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(*p) + tagsize
+}
+func sizeVarint64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v) + tagsize
+	}
+	return n
+}
+func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v)
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+	}
+	return n
+}
+func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+	}
+	return n
+}
+func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeBoolValue(_ pointer, tagsize int) int {
+	return 1 + tagsize
+}
+func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toBool()
+	if !v {
+		return 0
+	}
+	return 1 + tagsize
+}
+func sizeBoolPtr(ptr pointer, tagsize int) int {
+	p := *ptr.toBoolPtr()
+	if p == nil {
+		return 0
+	}
+	return 1 + tagsize
+}
+func sizeBoolSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBoolSlice()
+	return (1 + tagsize) * len(s)
+}
+func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBoolSlice()
+	if len(s) == 0 {
+		return 0
+	}
+	return len(s) + SizeVarint(uint64(len(s))) + tagsize
+}
+func sizeStringValue(ptr pointer, tagsize int) int {
+	v := *ptr.toString()
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toString()
+	if v == "" {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringPtr(ptr pointer, tagsize int) int {
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toStringSlice()
+	n := 0
+	for _, v := range s {
+		n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+	}
+	return n
+}
+func sizeBytes(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	if v == nil {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytes3(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	if len(v) == 0 {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesOneof(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBytesSlice()
+	n := 0
+	for _, v := range s {
+		n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+	}
+	return n
+}
+
+// appendFixed32 appends an encoded fixed32 to b.
+func appendFixed32(b []byte, v uint32) []byte {
+	b = append(b,
+		byte(v),
+		byte(v>>8),
+		byte(v>>16),
+		byte(v>>24))
+	return b
+}
+
+// appendFixed64 appends an encoded fixed64 to b.
+func appendFixed64(b []byte, v uint64) []byte {
+	b = append(b,
+		byte(v),
+		byte(v>>8),
+		byte(v>>16),
+		byte(v>>24),
+		byte(v>>32),
+		byte(v>>40),
+		byte(v>>48),
+		byte(v>>56))
+	return b
+}
+
+// appendVarint appends an encoded varint to b.
+func appendVarint(b []byte, v uint64) []byte {
+	// TODO: make 1-byte (maybe 2-byte) case inline-able, once we
+	// have non-leaf inliner.
+	switch {
+	case v < 1<<7:
+		b = append(b, byte(v))
+	case v < 1<<14:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte(v>>7))
+	case v < 1<<21:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte(v>>14))
+	case v < 1<<28:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte(v>>21))
+	case v < 1<<35:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte(v>>28))
+	case v < 1<<42:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte(v>>35))
+	case v < 1<<49:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte(v>>42))
+	case v < 1<<56:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte(v>>49))
+	case v < 1<<63:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte((v>>49)&0x7f|0x80),
+			byte(v>>56))
+	default:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte((v>>49)&0x7f|0x80),
+			byte((v>>56)&0x7f|0x80),
+			1)
+	}
+	return b
+}
+
+func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, *p)
+	return b, nil
+}
+func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, v)
+	}
+	return b, nil
+}
+func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, v)
+	}
+	return b, nil
+}
+func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(v))
+	return b, nil
+}
+func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(v))
+	return b, nil
+}
+func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(*p))
+	return b, nil
+}
+func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, uint32(v))
+	}
+	return b, nil
+}
+func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, uint32(v))
+	}
+	return b, nil
+}
+func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float32bits(*ptr.toFloat32())
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float32bits(*ptr.toFloat32())
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toFloat32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, math.Float32bits(*p))
+	return b, nil
+}
+func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, math.Float32bits(v))
+	}
+	return b, nil
+}
+func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, math.Float32bits(v))
+	}
+	return b, nil
+}
+func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, *p)
+	return b, nil
+}
+func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, v)
+	}
+	return b, nil
+}
+func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, v)
+	}
+	return b, nil
+}
+func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(v))
+	return b, nil
+}
+func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(v))
+	return b, nil
+}
+func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(*p))
+	return b, nil
+}
+func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, uint64(v))
+	}
+	return b, nil
+}
+func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, uint64(v))
+	}
+	return b, nil
+}
+func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float64bits(*ptr.toFloat64())
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float64bits(*ptr.toFloat64())
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toFloat64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, math.Float64bits(*p))
+	return b, nil
+}
+func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, math.Float64bits(v))
+	}
+	return b, nil
+}
+func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, math.Float64bits(v))
+	}
+	return b, nil
+}
+func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, v)
+	return b, nil
+}
+func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, v)
+	return b, nil
+}
+func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, *p)
+	return b, nil
+}
+func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, v)
+	}
+	return b, nil
+}
+func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v)
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, v)
+	}
+	return b, nil
+}
+func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	v := *p
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	}
+	return b, nil
+}
+func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	}
+	return b, nil
+}
+func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	v := *p
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	}
+	return b, nil
+}
+func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	}
+	return b, nil
+}
+func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBool()
+	b = appendVarint(b, wiretag)
+	if v {
+		b = append(b, 1)
+	} else {
+		b = append(b, 0)
+	}
+	return b, nil
+}
+func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBool()
+	if !v {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = append(b, 1)
+	return b, nil
+}
+
+func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toBoolPtr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	if *p {
+		b = append(b, 1)
+	} else {
+		b = append(b, 0)
+	}
+	return b, nil
+}
+func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBoolSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		if v {
+			b = append(b, 1)
+		} else {
+			b = append(b, 0)
+		}
+	}
+	return b, nil
+}
+func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBoolSlice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(len(s)))
+	for _, v := range s {
+		if v {
+			b = append(b, 1)
+		} else {
+			b = append(b, 0)
+		}
+	}
+	return b, nil
+}
+func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toString()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toString()
+	if v == "" {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return b, nil
+	}
+	v := *p
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toStringSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	return b, nil
+}
+func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	v := *ptr.toString()
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	v := *ptr.toString()
+	if v == "" {
+		return b, nil
+	}
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return b, nil
+	}
+	v := *p
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	s := *ptr.toStringSlice()
+	for _, v := range s {
+		if !utf8.ValidString(v) {
+			invalidUTF8 = true
+		}
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	if v == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	if len(v) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBytesSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	return b, nil
+}
+
+// makeGroupMarshaler returns the sizer and marshaler for a group.
+// u is the marshal info of the underlying message.
+func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			return u.size(p) + 2*tagsize
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return b, nil
+			}
+			var err error
+			b = appendVarint(b, wiretag) // start group
+			b, err = u.marshal(b, p, deterministic)
+			b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+			return b, err
+		}
+}
+
+// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
+// u is the marshal info of the underlying message.
+func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getPointerSlice()
+			n := 0
+			for _, v := range s {
+				if v.isNil() {
+					continue
+				}
+				n += u.size(v) + 2*tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getPointerSlice()
+			var err error
+			var nerr nonFatal
+			for _, v := range s {
+				if v.isNil() {
+					return b, errRepeatedHasNil
+				}
+				b = appendVarint(b, wiretag) // start group
+				b, err = u.marshal(b, v, deterministic)
+				b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+				if !nerr.Merge(err) {
+					if err == ErrNil {
+						err = errRepeatedHasNil
+					}
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeMessageMarshaler returns the sizer and marshaler for a message field.
+// u is the marshal info of the message.
+func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			siz := u.size(p)
+			return siz + SizeVarint(uint64(siz)) + tagsize
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return b, nil
+			}
+			b = appendVarint(b, wiretag)
+			siz := u.cachedsize(p)
+			b = appendVarint(b, uint64(siz))
+			return u.marshal(b, p, deterministic)
+		}
+}
+
+// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
+// u is the marshal info of the message.
+func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getPointerSlice()
+			n := 0
+			for _, v := range s {
+				if v.isNil() {
+					continue
+				}
+				siz := u.size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getPointerSlice()
+			var err error
+			var nerr nonFatal
+			for _, v := range s {
+				if v.isNil() {
+					return b, errRepeatedHasNil
+				}
+				b = appendVarint(b, wiretag)
+				siz := u.cachedsize(v)
+				b = appendVarint(b, uint64(siz))
+				b, err = u.marshal(b, v, deterministic)
+
+				if !nerr.Merge(err) {
+					if err == ErrNil {
+						err = errRepeatedHasNil
+					}
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeMapMarshaler returns the sizer and marshaler for a map field.
+// f is the pointer to the reflect data structure of the field.
+func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
+	// figure out key and value type
+	t := f.Type
+	keyType := t.Key()
+	valType := t.Elem()
+	tags := strings.Split(f.Tag.Get("protobuf"), ",")
+	keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
+	valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+	stdOptions := false
+	for _, t := range tags {
+		if strings.HasPrefix(t, "customtype=") {
+			valTags = append(valTags, t)
+		}
+		if t == "stdtime" {
+			valTags = append(valTags, t)
+			stdOptions = true
+		}
+		if t == "stdduration" {
+			valTags = append(valTags, t)
+			stdOptions = true
+		}
+		if t == "wktptr" {
+			valTags = append(valTags, t)
+		}
+	}
+	keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
+	valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
+	keyWireTag := 1<<3 | wiretype(keyTags[0])
+	valWireTag := 2<<3 | wiretype(valTags[0])
+
+	// We create an interface to get the addresses of the map key and value.
+	// If value is pointer-typed, the interface is a direct interface, the
+	// idata itself is the value. Otherwise, the idata is the pointer to the
+	// value.
+	// Key cannot be pointer-typed.
+	valIsPtr := valType.Kind() == reflect.Ptr
+
+	// If value is a message with nested maps, calling
+	// valSizer in marshal may be quadratic. We should use
+	// cached version in marshal (but not in size).
+	// If value is not message type, we don't have size cache,
+	// but it cannot be nested either. Just use valSizer.
+	valCachedSizer := valSizer
+	if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct {
+		u := getMarshalInfo(valType.Elem())
+		valCachedSizer = func(ptr pointer, tagsize int) int {
+			// Same as message sizer, but use cache.
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			siz := u.cachedsize(p)
+			return siz + SizeVarint(uint64(siz)) + tagsize
+		}
+	}
+	return func(ptr pointer, tagsize int) int {
+			m := ptr.asPointerTo(t).Elem() // the map
+			n := 0
+			for _, k := range m.MapKeys() {
+				ki := k.Interface()
+				vi := m.MapIndex(k).Interface()
+				kaddr := toAddrPointer(&ki, false)             // pointer to key
+				vaddr := toAddrPointer(&vi, valIsPtr)          // pointer to value
+				siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
+			m := ptr.asPointerTo(t).Elem() // the map
+			var err error
+			keys := m.MapKeys()
+			if len(keys) > 1 && deterministic {
+				sort.Sort(mapKeys(keys))
+			}
+
+			var nerr nonFatal
+			for _, k := range keys {
+				ki := k.Interface()
+				vi := m.MapIndex(k).Interface()
+				kaddr := toAddrPointer(&ki, false)    // pointer to key
+				vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+				b = appendVarint(b, tag)
+				siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+				b = appendVarint(b, uint64(siz))
+				b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
+				if !nerr.Merge(err) {
+					return b, err
+				}
+				b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
+				if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
+// fi is the marshal info of the field.
+// f is the pointer to the reflect data structure of the field.
+func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
+	// Oneof field is an interface. We need to get the actual data type on the fly.
+	t := f.Type
+	return func(ptr pointer, _ int) int {
+			p := ptr.getInterfacePointer()
+			if p.isNil() {
+				return 0
+			}
+			v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+			telem := v.Type()
+			e := fi.oneofElems[telem]
+			return e.sizer(p, e.tagsize)
+		},
+		func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getInterfacePointer()
+			if p.isNil() {
+				return b, nil
+			}
+			v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+			telem := v.Type()
+			if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
+				return b, errOneofHasNil
+			}
+			e := fi.oneofElems[telem]
+			return e.marshaler(b, p, e.wiretag, deterministic)
+		}
+}
+
+// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
+func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return 0
+	}
+	mu.Lock()
+
+	n := 0
+	for _, e := range m {
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			n += len(e.enc)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		n += ei.sizer(p, ei.tagsize)
+	}
+	mu.Unlock()
+	return n
+}
+
+// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
+func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return b, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+
+	var err error
+	var nerr nonFatal
+
+	// Fast-path for common cases: zero or one extensions.
+	// Don't bother sorting the keys.
+	if len(m) <= 1 {
+		for _, e := range m {
+			if e.value == nil || e.desc == nil {
+				// Extension is only in its encoded form.
+				b = append(b, e.enc...)
+				continue
+			}
+
+			// We don't skip extensions that have an encoded form set,
+			// because the extension value may have been mutated after
+			// the last time this function was called.
+
+			ei := u.getExtElemInfo(e.desc)
+			v := e.value
+			p := toAddrPointer(&v, ei.isptr)
+			b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+			if !nerr.Merge(err) {
+				return b, err
+			}
+		}
+		return b, nerr.E
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	// Not sure this is required, but the old code does it.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	for _, k := range keys {
+		e := m[int32(k)]
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			b = append(b, e.enc...)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// message set format is:
+//   message MessageSet {
+//     repeated group Item = 1 {
+//       required int32 type_id = 2;
+//       required string message = 3;
+//     };
+//   }
+
+// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
+// in message set format (above).
+func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return 0
+	}
+	mu.Lock()
+
+	n := 0
+	for id, e := range m {
+		n += 2                          // start group, end group. tag = 1 (size=1)
+		n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
+
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+			siz := len(msgWithLen)
+			n += siz + 1 // message, tag = 3 (size=1)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		n += ei.sizer(p, 1) // message, tag = 3 (size=1)
+	}
+	mu.Unlock()
+	return n
+}
+
+// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
+// to the end of byte slice b.
+func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return b, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+
+	var err error
+	var nerr nonFatal
+
+	// Fast-path for common cases: zero or one extensions.
+	// Don't bother sorting the keys.
+	if len(m) <= 1 {
+		for id, e := range m {
+			b = append(b, 1<<3|WireStartGroup)
+			b = append(b, 2<<3|WireVarint)
+			b = appendVarint(b, uint64(id))
+
+			if e.value == nil || e.desc == nil {
+				// Extension is only in its encoded form.
+				msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+				b = append(b, 3<<3|WireBytes)
+				b = append(b, msgWithLen...)
+				b = append(b, 1<<3|WireEndGroup)
+				continue
+			}
+
+			// We don't skip extensions that have an encoded form set,
+			// because the extension value may have been mutated after
+			// the last time this function was called.
+
+			ei := u.getExtElemInfo(e.desc)
+			v := e.value
+			p := toAddrPointer(&v, ei.isptr)
+			b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+			if !nerr.Merge(err) {
+				return b, err
+			}
+			b = append(b, 1<<3|WireEndGroup)
+		}
+		return b, nerr.E
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	for _, id := range keys {
+		e := m[int32(id)]
+		b = append(b, 1<<3|WireStartGroup)
+		b = append(b, 2<<3|WireVarint)
+		b = appendVarint(b, uint64(id))
+
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+			b = append(b, 3<<3|WireBytes)
+			b = append(b, msgWithLen...)
+			b = append(b, 1<<3|WireEndGroup)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+		b = append(b, 1<<3|WireEndGroup)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
+func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
+	if m == nil {
+		return 0
+	}
+
+	n := 0
+	for _, e := range m {
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			n += len(e.enc)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		n += ei.sizer(p, ei.tagsize)
+	}
+	return n
+}
+
+// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
+func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
+	if m == nil {
+		return b, nil
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	var err error
+	var nerr nonFatal
+	for _, k := range keys {
+		e := m[int32(k)]
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			b = append(b, e.enc...)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// newMarshaler is the interface representing objects that can marshal themselves.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newMarshaler interface {
+	XXX_Size() int
+	XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
+}
+
+// Size returns the encoded size of a protocol buffer message.
+// This is the main entry point.
+func Size(pb Message) int {
+	if m, ok := pb.(newMarshaler); ok {
+		return m.XXX_Size()
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		b, _ := m.Marshal()
+		return len(b)
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return 0
+	}
+	var info InternalMessageInfo
+	return info.Size(pb)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, returning the data.
+// This is the main entry point.
+func Marshal(pb Message) ([]byte, error) {
+	if m, ok := pb.(newMarshaler); ok {
+		siz := m.XXX_Size()
+		b := make([]byte, 0, siz)
+		return m.XXX_Marshal(b, false)
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		return m.Marshal()
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return nil, ErrNil
+	}
+	var info InternalMessageInfo
+	siz := info.Size(pb)
+	b := make([]byte, 0, siz)
+	return info.Marshal(b, pb, false)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+// This is an alternative entry point. It is not necessary to use
+// a Buffer for most applications.
+func (p *Buffer) Marshal(pb Message) error {
+	var err error
+	if p.deterministic {
+		if _, ok := pb.(Marshaler); ok {
+			return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb)
+		}
+	}
+	if m, ok := pb.(newMarshaler); ok {
+		siz := m.XXX_Size()
+		p.grow(siz) // make sure buf has enough capacity
+		p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
+		return err
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		var b []byte
+		b, err = m.Marshal()
+		p.buf = append(p.buf, b...)
+		return err
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return ErrNil
+	}
+	var info InternalMessageInfo
+	siz := info.Size(pb)
+	p.grow(siz) // make sure buf has enough capacity
+	p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
+	return err
+}
+
+// grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+func (p *Buffer) grow(n int) {
+	need := len(p.buf) + n
+	if need <= cap(p.buf) {
+		return
+	}
+	newCap := len(p.buf) * 2
+	if newCap < need {
+		newCap = need
+	}
+	p.buf = append(make([]byte, 0, newCap), p.buf...)
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go
new file mode 100644
index 0000000..997f57c
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go
@@ -0,0 +1,388 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"reflect"
+	"time"
+)
+
+// makeMessageRefMarshaler differs a bit from makeMessageMarshaler
+// It marshal a message T instead of a *T
+func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			siz := u.size(ptr)
+			return siz + SizeVarint(uint64(siz)) + tagsize
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			b = appendVarint(b, wiretag)
+			siz := u.cachedsize(ptr)
+			b = appendVarint(b, uint64(siz))
+			return u.marshal(b, ptr, deterministic)
+		}
+}
+
+// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler
+// It marshals a slice of messages []T instead of []*T
+func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				e := elem.Interface()
+				v := toAddrPointer(&e, false)
+				siz := u.size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			var err, errreq error
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				e := elem.Interface()
+				v := toAddrPointer(&e, false)
+				b = appendVarint(b, wiretag)
+				siz := u.size(v)
+				b = appendVarint(b, uint64(siz))
+				b, err = u.marshal(b, v, deterministic)
+
+				if err != nil {
+					if _, ok := err.(*RequiredNotSetError); ok {
+						// Required field in submessage is not set.
+						// We record the error but keep going, to give a complete marshaling.
+						if errreq == nil {
+							errreq = err
+						}
+						continue
+					}
+					if err == ErrNil {
+						err = errRepeatedHasNil
+					}
+					return b, err
+				}
+			}
+
+			return b, errreq
+		}
+}
+
+func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom)
+			siz := m.Size()
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom)
+			siz := m.Size()
+			buf, err := m.Marshal()
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(siz))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			m := ptr.asPointerTo(u.typ).Interface().(custom)
+			siz := m.Size()
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			m := ptr.asPointerTo(u.typ).Interface().(custom)
+			siz := m.Size()
+			buf, err := m.Marshal()
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(siz))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*time.Time)
+			ts, err := timestampProto(*t)
+			if err != nil {
+				return 0
+			}
+			siz := Size(ts)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*time.Time)
+			ts, err := timestampProto(*t)
+			if err != nil {
+				return nil, err
+			}
+			buf, err := Marshal(ts)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time)
+			ts, err := timestampProto(*t)
+			if err != nil {
+				return 0
+			}
+			siz := Size(ts)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time)
+			ts, err := timestampProto(*t)
+			if err != nil {
+				return nil, err
+			}
+			buf, err := Marshal(ts)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(time.Time)
+				ts, err := timestampProto(t)
+				if err != nil {
+					return 0
+				}
+				siz := Size(ts)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(time.Time)
+				ts, err := timestampProto(t)
+				if err != nil {
+					return nil, err
+				}
+				siz := Size(ts)
+				buf, err := Marshal(ts)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*time.Time)
+				ts, err := timestampProto(*t)
+				if err != nil {
+					return 0
+				}
+				siz := Size(ts)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*time.Time)
+				ts, err := timestampProto(*t)
+				if err != nil {
+					return nil, err
+				}
+				siz := Size(ts)
+				buf, err := Marshal(ts)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			d := ptr.asPointerTo(u.typ).Interface().(*time.Duration)
+			dur := durationProto(*d)
+			siz := Size(dur)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			d := ptr.asPointerTo(u.typ).Interface().(*time.Duration)
+			dur := durationProto(*d)
+			buf, err := Marshal(dur)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration)
+			dur := durationProto(*d)
+			siz := Size(dur)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration)
+			dur := durationProto(*d)
+			buf, err := Marshal(dur)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				d := elem.Interface().(time.Duration)
+				dur := durationProto(d)
+				siz := Size(dur)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				d := elem.Interface().(time.Duration)
+				dur := durationProto(d)
+				siz := Size(dur)
+				buf, err := Marshal(dur)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				d := elem.Interface().(*time.Duration)
+				dur := durationProto(*d)
+				siz := Size(dur)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				d := elem.Interface().(*time.Duration)
+				dur := durationProto(*d)
+				siz := Size(dur)
+				buf, err := Marshal(dur)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go
new file mode 100644
index 0000000..f520106
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go
@@ -0,0 +1,657 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+	"sync/atomic"
+)
+
+// Merge merges the src message into dst.
+// This assumes that dst and src of the same type and are non-nil.
+func (a *InternalMessageInfo) Merge(dst, src Message) {
+	mi := atomicLoadMergeInfo(&a.merge)
+	if mi == nil {
+		mi = getMergeInfo(reflect.TypeOf(dst).Elem())
+		atomicStoreMergeInfo(&a.merge, mi)
+	}
+	mi.merge(toPointer(&dst), toPointer(&src))
+}
+
+type mergeInfo struct {
+	typ reflect.Type
+
+	initialized int32 // 0: only typ is valid, 1: everything is valid
+	lock        sync.Mutex
+
+	fields       []mergeFieldInfo
+	unrecognized field // Offset of XXX_unrecognized
+}
+
+type mergeFieldInfo struct {
+	field field // Offset of field, guaranteed to be valid
+
+	// isPointer reports whether the value in the field is a pointer.
+	// This is true for the following situations:
+	//	* Pointer to struct
+	//	* Pointer to basic type (proto2 only)
+	//	* Slice (first value in slice header is a pointer)
+	//	* String (first value in string header is a pointer)
+	isPointer bool
+
+	// basicWidth reports the width of the field assuming that it is directly
+	// embedded in the struct (as is the case for basic types in proto3).
+	// The possible values are:
+	// 	0: invalid
+	//	1: bool
+	//	4: int32, uint32, float32
+	//	8: int64, uint64, float64
+	basicWidth int
+
+	// Where dst and src are pointers to the types being merged.
+	merge func(dst, src pointer)
+}
+
+var (
+	mergeInfoMap  = map[reflect.Type]*mergeInfo{}
+	mergeInfoLock sync.Mutex
+)
+
+func getMergeInfo(t reflect.Type) *mergeInfo {
+	mergeInfoLock.Lock()
+	defer mergeInfoLock.Unlock()
+	mi := mergeInfoMap[t]
+	if mi == nil {
+		mi = &mergeInfo{typ: t}
+		mergeInfoMap[t] = mi
+	}
+	return mi
+}
+
+// merge merges src into dst assuming they are both of type *mi.typ.
+func (mi *mergeInfo) merge(dst, src pointer) {
+	if dst.isNil() {
+		panic("proto: nil destination")
+	}
+	if src.isNil() {
+		return // Nothing to do.
+	}
+
+	if atomic.LoadInt32(&mi.initialized) == 0 {
+		mi.computeMergeInfo()
+	}
+
+	for _, fi := range mi.fields {
+		sfp := src.offset(fi.field)
+
+		// As an optimization, we can avoid the merge function call cost
+		// if we know for sure that the source will have no effect
+		// by checking if it is the zero value.
+		if unsafeAllowed {
+			if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
+				continue
+			}
+			if fi.basicWidth > 0 {
+				switch {
+				case fi.basicWidth == 1 && !*sfp.toBool():
+					continue
+				case fi.basicWidth == 4 && *sfp.toUint32() == 0:
+					continue
+				case fi.basicWidth == 8 && *sfp.toUint64() == 0:
+					continue
+				}
+			}
+		}
+
+		dfp := dst.offset(fi.field)
+		fi.merge(dfp, sfp)
+	}
+
+	// TODO: Make this faster?
+	out := dst.asPointerTo(mi.typ).Elem()
+	in := src.asPointerTo(mi.typ).Elem()
+	if emIn, err := extendable(in.Addr().Interface()); err == nil {
+		emOut, _ := extendable(out.Addr().Interface())
+		mIn, muIn := emIn.extensionsRead()
+		if mIn != nil {
+			mOut := emOut.extensionsWrite()
+			muIn.Lock()
+			mergeExtension(mOut, mIn)
+			muIn.Unlock()
+		}
+	}
+
+	if mi.unrecognized.IsValid() {
+		if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
+			*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
+		}
+	}
+}
+
+func (mi *mergeInfo) computeMergeInfo() {
+	mi.lock.Lock()
+	defer mi.lock.Unlock()
+	if mi.initialized != 0 {
+		return
+	}
+	t := mi.typ
+	n := t.NumField()
+
+	props := GetProperties(t)
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+
+		mfi := mergeFieldInfo{field: toField(&f)}
+		tf := f.Type
+
+		// As an optimization, we can avoid the merge function call cost
+		// if we know for sure that the source will have no effect
+		// by checking if it is the zero value.
+		if unsafeAllowed {
+			switch tf.Kind() {
+			case reflect.Ptr, reflect.Slice, reflect.String:
+				// As a special case, we assume slices and strings are pointers
+				// since we know that the first field in the SliceSlice or
+				// StringHeader is a data pointer.
+				mfi.isPointer = true
+			case reflect.Bool:
+				mfi.basicWidth = 1
+			case reflect.Int32, reflect.Uint32, reflect.Float32:
+				mfi.basicWidth = 4
+			case reflect.Int64, reflect.Uint64, reflect.Float64:
+				mfi.basicWidth = 8
+			}
+		}
+
+		// Unwrap tf to get at its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic("both pointer and slice for basic type in " + tf.Name())
+		}
+
+		switch tf.Kind() {
+		case reflect.Int32:
+			switch {
+			case isSlice: // E.g., []int32
+				mfi.merge = func(dst, src pointer) {
+					// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
+					/*
+						sfsp := src.toInt32Slice()
+						if *sfsp != nil {
+							dfsp := dst.toInt32Slice()
+							*dfsp = append(*dfsp, *sfsp...)
+							if *dfsp == nil {
+								*dfsp = []int64{}
+							}
+						}
+					*/
+					sfs := src.getInt32Slice()
+					if sfs != nil {
+						dfs := dst.getInt32Slice()
+						dfs = append(dfs, sfs...)
+						if dfs == nil {
+							dfs = []int32{}
+						}
+						dst.setInt32Slice(dfs)
+					}
+				}
+			case isPointer: // E.g., *int32
+				mfi.merge = func(dst, src pointer) {
+					// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
+					/*
+						sfpp := src.toInt32Ptr()
+						if *sfpp != nil {
+							dfpp := dst.toInt32Ptr()
+							if *dfpp == nil {
+								*dfpp = Int32(**sfpp)
+							} else {
+								**dfpp = **sfpp
+							}
+						}
+					*/
+					sfp := src.getInt32Ptr()
+					if sfp != nil {
+						dfp := dst.getInt32Ptr()
+						if dfp == nil {
+							dst.setInt32Ptr(*sfp)
+						} else {
+							*dfp = *sfp
+						}
+					}
+				}
+			default: // E.g., int32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toInt32(); v != 0 {
+						*dst.toInt32() = v
+					}
+				}
+			}
+		case reflect.Int64:
+			switch {
+			case isSlice: // E.g., []int64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toInt64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toInt64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []int64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *int64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toInt64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toInt64Ptr()
+						if *dfpp == nil {
+							*dfpp = Int64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., int64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toInt64(); v != 0 {
+						*dst.toInt64() = v
+					}
+				}
+			}
+		case reflect.Uint32:
+			switch {
+			case isSlice: // E.g., []uint32
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toUint32Slice()
+					if *sfsp != nil {
+						dfsp := dst.toUint32Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []uint32{}
+						}
+					}
+				}
+			case isPointer: // E.g., *uint32
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toUint32Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toUint32Ptr()
+						if *dfpp == nil {
+							*dfpp = Uint32(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., uint32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toUint32(); v != 0 {
+						*dst.toUint32() = v
+					}
+				}
+			}
+		case reflect.Uint64:
+			switch {
+			case isSlice: // E.g., []uint64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toUint64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toUint64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []uint64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *uint64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toUint64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toUint64Ptr()
+						if *dfpp == nil {
+							*dfpp = Uint64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., uint64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toUint64(); v != 0 {
+						*dst.toUint64() = v
+					}
+				}
+			}
+		case reflect.Float32:
+			switch {
+			case isSlice: // E.g., []float32
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toFloat32Slice()
+					if *sfsp != nil {
+						dfsp := dst.toFloat32Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []float32{}
+						}
+					}
+				}
+			case isPointer: // E.g., *float32
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toFloat32Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toFloat32Ptr()
+						if *dfpp == nil {
+							*dfpp = Float32(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., float32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toFloat32(); v != 0 {
+						*dst.toFloat32() = v
+					}
+				}
+			}
+		case reflect.Float64:
+			switch {
+			case isSlice: // E.g., []float64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toFloat64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toFloat64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []float64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *float64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toFloat64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toFloat64Ptr()
+						if *dfpp == nil {
+							*dfpp = Float64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., float64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toFloat64(); v != 0 {
+						*dst.toFloat64() = v
+					}
+				}
+			}
+		case reflect.Bool:
+			switch {
+			case isSlice: // E.g., []bool
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toBoolSlice()
+					if *sfsp != nil {
+						dfsp := dst.toBoolSlice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []bool{}
+						}
+					}
+				}
+			case isPointer: // E.g., *bool
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toBoolPtr()
+					if *sfpp != nil {
+						dfpp := dst.toBoolPtr()
+						if *dfpp == nil {
+							*dfpp = Bool(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., bool
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toBool(); v {
+						*dst.toBool() = v
+					}
+				}
+			}
+		case reflect.String:
+			switch {
+			case isSlice: // E.g., []string
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toStringSlice()
+					if *sfsp != nil {
+						dfsp := dst.toStringSlice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []string{}
+						}
+					}
+				}
+			case isPointer: // E.g., *string
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toStringPtr()
+					if *sfpp != nil {
+						dfpp := dst.toStringPtr()
+						if *dfpp == nil {
+							*dfpp = String(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., string
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toString(); v != "" {
+						*dst.toString() = v
+					}
+				}
+			}
+		case reflect.Slice:
+			isProto3 := props.Prop[i].proto3
+			switch {
+			case isPointer:
+				panic("bad pointer in byte slice case in " + tf.Name())
+			case tf.Elem().Kind() != reflect.Uint8:
+				panic("bad element kind in byte slice case in " + tf.Name())
+			case isSlice: // E.g., [][]byte
+				mfi.merge = func(dst, src pointer) {
+					sbsp := src.toBytesSlice()
+					if *sbsp != nil {
+						dbsp := dst.toBytesSlice()
+						for _, sb := range *sbsp {
+							if sb == nil {
+								*dbsp = append(*dbsp, nil)
+							} else {
+								*dbsp = append(*dbsp, append([]byte{}, sb...))
+							}
+						}
+						if *dbsp == nil {
+							*dbsp = [][]byte{}
+						}
+					}
+				}
+			default: // E.g., []byte
+				mfi.merge = func(dst, src pointer) {
+					sbp := src.toBytes()
+					if *sbp != nil {
+						dbp := dst.toBytes()
+						if !isProto3 || len(*sbp) > 0 {
+							*dbp = append([]byte{}, *sbp...)
+						}
+					}
+				}
+			}
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				mergeInfo := getMergeInfo(tf)
+				mfi.merge = func(dst, src pointer) {
+					mergeInfo.merge(dst, src)
+				}
+			case isSlice: // E.g., []*pb.T
+				mergeInfo := getMergeInfo(tf)
+				mfi.merge = func(dst, src pointer) {
+					sps := src.getPointerSlice()
+					if sps != nil {
+						dps := dst.getPointerSlice()
+						for _, sp := range sps {
+							var dp pointer
+							if !sp.isNil() {
+								dp = valToPointer(reflect.New(tf))
+								mergeInfo.merge(dp, sp)
+							}
+							dps = append(dps, dp)
+						}
+						if dps == nil {
+							dps = []pointer{}
+						}
+						dst.setPointerSlice(dps)
+					}
+				}
+			default: // E.g., *pb.T
+				mergeInfo := getMergeInfo(tf)
+				mfi.merge = func(dst, src pointer) {
+					sp := src.getPointer()
+					if !sp.isNil() {
+						dp := dst.getPointer()
+						if dp.isNil() {
+							dp = valToPointer(reflect.New(tf))
+							dst.setPointer(dp)
+						}
+						mergeInfo.merge(dp, sp)
+					}
+				}
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic("bad pointer or slice in map case in " + tf.Name())
+			default: // E.g., map[K]V
+				mfi.merge = func(dst, src pointer) {
+					sm := src.asPointerTo(tf).Elem()
+					if sm.Len() == 0 {
+						return
+					}
+					dm := dst.asPointerTo(tf).Elem()
+					if dm.IsNil() {
+						dm.Set(reflect.MakeMap(tf))
+					}
+
+					switch tf.Elem().Kind() {
+					case reflect.Ptr: // Proto struct (e.g., *T)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							val = reflect.ValueOf(Clone(val.Interface().(Message)))
+							dm.SetMapIndex(key, val)
+						}
+					case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+							dm.SetMapIndex(key, val)
+						}
+					default: // Basic type (e.g., string)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							dm.SetMapIndex(key, val)
+						}
+					}
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic("bad pointer or slice in interface case in " + tf.Name())
+			default: // E.g., interface{}
+				// TODO: Make this faster?
+				mfi.merge = func(dst, src pointer) {
+					su := src.asPointerTo(tf).Elem()
+					if !su.IsNil() {
+						du := dst.asPointerTo(tf).Elem()
+						typ := su.Elem().Type()
+						if du.IsNil() || du.Elem().Type() != typ {
+							du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
+						}
+						sv := su.Elem().Elem().Field(0)
+						if sv.Kind() == reflect.Ptr && sv.IsNil() {
+							return
+						}
+						dv := du.Elem().Elem().Field(0)
+						if dv.Kind() == reflect.Ptr && dv.IsNil() {
+							dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
+						}
+						switch sv.Type().Kind() {
+						case reflect.Ptr: // Proto struct (e.g., *T)
+							Merge(dv.Interface().(Message), sv.Interface().(Message))
+						case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+							dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
+						default: // Basic type (e.g., string)
+							dv.Set(sv)
+						}
+					}
+				}
+			}
+		default:
+			panic(fmt.Sprintf("merger not found for type:%s", tf))
+		}
+		mi.fields = append(mi.fields, mfi)
+	}
+
+	mi.unrecognized = invalidField
+	if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+		if f.Type != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		mi.unrecognized = toField(&f)
+	}
+
+	atomic.StoreInt32(&mi.initialized, 1)
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
new file mode 100644
index 0000000..e6b15c7
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
@@ -0,0 +1,2245 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"unicode/utf8"
+)
+
+// Unmarshal is the entry point from the generated .pb.go files.
+// This function is not intended to be used by non-generated code.
+// This function is not subject to any compatibility guarantee.
+// msg contains a pointer to a protocol buffer struct.
+// b is the data to be unmarshaled into the protocol buffer.
+// a is a pointer to a place to store cached unmarshal information.
+func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
+	// Load the unmarshal information for this message type.
+	// The atomic load ensures memory consistency.
+	u := atomicLoadUnmarshalInfo(&a.unmarshal)
+	if u == nil {
+		// Slow path: find unmarshal info for msg, update a with it.
+		u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
+		atomicStoreUnmarshalInfo(&a.unmarshal, u)
+	}
+	// Then do the unmarshaling.
+	err := u.unmarshal(toPointer(&msg), b)
+	return err
+}
+
+type unmarshalInfo struct {
+	typ reflect.Type // type of the protobuf struct
+
+	// 0 = only typ field is initialized
+	// 1 = completely initialized
+	initialized     int32
+	lock            sync.Mutex                    // prevents double initialization
+	dense           []unmarshalFieldInfo          // fields indexed by tag #
+	sparse          map[uint64]unmarshalFieldInfo // fields indexed by tag #
+	reqFields       []string                      // names of required fields
+	reqMask         uint64                        // 1<<len(reqFields)-1
+	unrecognized    field                         // offset of []byte to put unrecognized data (or invalidField if we should throw it away)
+	extensions      field                         // offset of extensions field (of type proto.XXX_InternalExtensions), or invalidField if it does not exist
+	oldExtensions   field                         // offset of old-form extensions field (of type map[int]Extension)
+	extensionRanges []ExtensionRange              // if non-nil, implies extensions field is valid
+	isMessageSet    bool                          // if true, implies extensions field is valid
+
+	bytesExtensions field // offset of XXX_extensions with type []byte
+}
+
+// An unmarshaler takes a stream of bytes and a pointer to a field of a message.
+// It decodes the field, stores it at f, and returns the unused bytes.
+// w is the wire encoding.
+// b is the data after the tag and wire encoding have been read.
+type unmarshaler func(b []byte, f pointer, w int) ([]byte, error)
+
+type unmarshalFieldInfo struct {
+	// location of the field in the proto message structure.
+	field field
+
+	// function to unmarshal the data for the field.
+	unmarshal unmarshaler
+
+	// if a required field, contains a single set bit at this field's index in the required field list.
+	reqMask uint64
+
+	name string // name of the field, for error reporting
+}
+
+var (
+	unmarshalInfoMap  = map[reflect.Type]*unmarshalInfo{}
+	unmarshalInfoLock sync.Mutex
+)
+
+// getUnmarshalInfo returns the data structure which can be
+// subsequently used to unmarshal a message of the given type.
+// t is the type of the message (note: not pointer to message).
+func getUnmarshalInfo(t reflect.Type) *unmarshalInfo {
+	// It would be correct to return a new unmarshalInfo
+	// unconditionally. We would end up allocating one
+	// per occurrence of that type as a message or submessage.
+	// We use a cache here just to reduce memory usage.
+	unmarshalInfoLock.Lock()
+	defer unmarshalInfoLock.Unlock()
+	u := unmarshalInfoMap[t]
+	if u == nil {
+		u = &unmarshalInfo{typ: t}
+		// Note: we just set the type here. The rest of the fields
+		// will be initialized on first use.
+		unmarshalInfoMap[t] = u
+	}
+	return u
+}
+
+// unmarshal does the main work of unmarshaling a message.
+// u provides type information used to unmarshal the message.
+// m is a pointer to a protocol buffer message.
+// b is a byte stream to unmarshal into m.
+// This is top routine used when recursively unmarshaling submessages.
+func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeUnmarshalInfo()
+	}
+	if u.isMessageSet {
+		return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
+	}
+	var reqMask uint64 // bitmask of required fields we've seen.
+	var errLater error
+	for len(b) > 0 {
+		// Read tag and wire type.
+		// Special case 1 and 2 byte varints.
+		var x uint64
+		if b[0] < 128 {
+			x = uint64(b[0])
+			b = b[1:]
+		} else if len(b) >= 2 && b[1] < 128 {
+			x = uint64(b[0]&0x7f) + uint64(b[1])<<7
+			b = b[2:]
+		} else {
+			var n int
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+		}
+		tag := x >> 3
+		wire := int(x) & 7
+
+		// Dispatch on the tag to one of the unmarshal* functions below.
+		var f unmarshalFieldInfo
+		if tag < uint64(len(u.dense)) {
+			f = u.dense[tag]
+		} else {
+			f = u.sparse[tag]
+		}
+		if fn := f.unmarshal; fn != nil {
+			var err error
+			b, err = fn(b, m.offset(f.field), wire)
+			if err == nil {
+				reqMask |= f.reqMask
+				continue
+			}
+			if r, ok := err.(*RequiredNotSetError); ok {
+				// Remember this error, but keep parsing. We need to produce
+				// a full parse even if a required field is missing.
+				if errLater == nil {
+					errLater = r
+				}
+				reqMask |= f.reqMask
+				continue
+			}
+			if err != errInternalBadWireType {
+				if err == errInvalidUTF8 {
+					if errLater == nil {
+						fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+						errLater = &invalidUTF8Error{fullName}
+					}
+					continue
+				}
+				return err
+			}
+			// Fragments with bad wire type are treated as unknown fields.
+		}
+
+		// Unknown tag.
+		if !u.unrecognized.IsValid() {
+			// Don't keep unrecognized data; just skip it.
+			var err error
+			b, err = skipField(b, wire)
+			if err != nil {
+				return err
+			}
+			continue
+		}
+		// Keep unrecognized data around.
+		// maybe in extensions, maybe in the unrecognized field.
+		z := m.offset(u.unrecognized).toBytes()
+		var emap map[int32]Extension
+		var e Extension
+		for _, r := range u.extensionRanges {
+			if uint64(r.Start) <= tag && tag <= uint64(r.End) {
+				if u.extensions.IsValid() {
+					mp := m.offset(u.extensions).toExtensions()
+					emap = mp.extensionsWrite()
+					e = emap[int32(tag)]
+					z = &e.enc
+					break
+				}
+				if u.oldExtensions.IsValid() {
+					p := m.offset(u.oldExtensions).toOldExtensions()
+					emap = *p
+					if emap == nil {
+						emap = map[int32]Extension{}
+						*p = emap
+					}
+					e = emap[int32(tag)]
+					z = &e.enc
+					break
+				}
+				if u.bytesExtensions.IsValid() {
+					z = m.offset(u.bytesExtensions).toBytes()
+					break
+				}
+				panic("no extensions field available")
+			}
+		}
+		// Use wire type to skip data.
+		var err error
+		b0 := b
+		b, err = skipField(b, wire)
+		if err != nil {
+			return err
+		}
+		*z = encodeVarint(*z, tag<<3|uint64(wire))
+		*z = append(*z, b0[:len(b0)-len(b)]...)
+
+		if emap != nil {
+			emap[int32(tag)] = e
+		}
+	}
+	if reqMask != u.reqMask && errLater == nil {
+		// A required field of this message is missing.
+		for _, n := range u.reqFields {
+			if reqMask&1 == 0 {
+				errLater = &RequiredNotSetError{n}
+			}
+			reqMask >>= 1
+		}
+	}
+	return errLater
+}
+
+// computeUnmarshalInfo fills in u with information for use
+// in unmarshaling protocol buffers of type u.typ.
+func (u *unmarshalInfo) computeUnmarshalInfo() {
+	u.lock.Lock()
+	defer u.lock.Unlock()
+	if u.initialized != 0 {
+		return
+	}
+	t := u.typ
+	n := t.NumField()
+
+	// Set up the "not found" value for the unrecognized byte buffer.
+	// This is the default for proto3.
+	u.unrecognized = invalidField
+	u.extensions = invalidField
+	u.oldExtensions = invalidField
+	u.bytesExtensions = invalidField
+
+	// List of the generated type and offset for each oneof field.
+	type oneofField struct {
+		ityp  reflect.Type // interface type of oneof field
+		field field        // offset in containing message
+	}
+	var oneofFields []oneofField
+
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if f.Name == "XXX_unrecognized" {
+			// The byte slice used to hold unrecognized input is special.
+			if f.Type != reflect.TypeOf(([]byte)(nil)) {
+				panic("bad type for XXX_unrecognized field: " + f.Type.Name())
+			}
+			u.unrecognized = toField(&f)
+			continue
+		}
+		if f.Name == "XXX_InternalExtensions" {
+			// Ditto here.
+			if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
+				panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
+			}
+			u.extensions = toField(&f)
+			if f.Tag.Get("protobuf_messageset") == "1" {
+				u.isMessageSet = true
+			}
+			continue
+		}
+		if f.Name == "XXX_extensions" {
+			// An older form of the extensions field.
+			if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) {
+				u.oldExtensions = toField(&f)
+				continue
+			} else if f.Type == reflect.TypeOf(([]byte)(nil)) {
+				u.bytesExtensions = toField(&f)
+				continue
+			}
+			panic("bad type for XXX_extensions field: " + f.Type.Name())
+		}
+		if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
+			continue
+		}
+
+		oneof := f.Tag.Get("protobuf_oneof")
+		if oneof != "" {
+			oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
+			// The rest of oneof processing happens below.
+			continue
+		}
+
+		tags := f.Tag.Get("protobuf")
+		tagArray := strings.Split(tags, ",")
+		if len(tagArray) < 2 {
+			panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
+		}
+		tag, err := strconv.Atoi(tagArray[1])
+		if err != nil {
+			panic("protobuf tag field not an integer: " + tagArray[1])
+		}
+
+		name := ""
+		for _, tag := range tagArray[3:] {
+			if strings.HasPrefix(tag, "name=") {
+				name = tag[5:]
+			}
+		}
+
+		// Extract unmarshaling function from the field (its type and tags).
+		unmarshal := fieldUnmarshaler(&f)
+
+		// Required field?
+		var reqMask uint64
+		if tagArray[2] == "req" {
+			bit := len(u.reqFields)
+			u.reqFields = append(u.reqFields, name)
+			reqMask = uint64(1) << uint(bit)
+			// TODO: if we have more than 64 required fields, we end up
+			// not verifying that all required fields are present.
+			// Fix this, perhaps using a count of required fields?
+		}
+
+		// Store the info in the correct slot in the message.
+		u.setTag(tag, toField(&f), unmarshal, reqMask, name)
+	}
+
+	// Find any types associated with oneof fields.
+	// TODO: XXX_OneofFuncs returns more info than we need.  Get rid of some of it?
+	fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
+	// gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler
+	if fn.IsValid() && len(oneofFields) > 0 {
+		res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
+		for i := res.Len() - 1; i >= 0; i-- {
+			v := res.Index(i)                             // interface{}
+			tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
+			typ := tptr.Elem()                            // Msg_X
+
+			f := typ.Field(0) // oneof implementers have one field
+			baseUnmarshal := fieldUnmarshaler(&f)
+			tags := strings.Split(f.Tag.Get("protobuf"), ",")
+			fieldNum, err := strconv.Atoi(tags[1])
+			if err != nil {
+				panic("protobuf tag field not an integer: " + tags[1])
+			}
+			var name string
+			for _, tag := range tags {
+				if strings.HasPrefix(tag, "name=") {
+					name = strings.TrimPrefix(tag, "name=")
+					break
+				}
+			}
+
+			// Find the oneof field that this struct implements.
+			// Might take O(n^2) to process all of the oneofs, but who cares.
+			for _, of := range oneofFields {
+				if tptr.Implements(of.ityp) {
+					// We have found the corresponding interface for this struct.
+					// That lets us know where this struct should be stored
+					// when we encounter it during unmarshaling.
+					unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+					u.setTag(fieldNum, of.field, unmarshal, 0, name)
+				}
+			}
+		}
+	}
+
+	// Get extension ranges, if any.
+	fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+	if fn.IsValid() {
+		if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() {
+			panic("a message with extensions, but no extensions field in " + t.Name())
+		}
+		u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
+	}
+
+	// Explicitly disallow tag 0. This will ensure we flag an error
+	// when decoding a buffer of all zeros. Without this code, we
+	// would decode and skip an all-zero buffer of even length.
+	// [0 0] is [tag=0/wiretype=varint varint-encoded-0].
+	u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
+		return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
+	}, 0, "")
+
+	// Set mask for required field check.
+	u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
+
+	atomic.StoreInt32(&u.initialized, 1)
+}
+
+// setTag stores the unmarshal information for the given tag.
+// tag = tag # for field
+// field/unmarshal = unmarshal info for that field.
+// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
+// name = short name of the field.
+func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
+	i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
+	n := u.typ.NumField()
+	if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
+		for len(u.dense) <= tag {
+			u.dense = append(u.dense, unmarshalFieldInfo{})
+		}
+		u.dense[tag] = i
+		return
+	}
+	if u.sparse == nil {
+		u.sparse = map[uint64]unmarshalFieldInfo{}
+	}
+	u.sparse[uint64(tag)] = i
+}
+
+// fieldUnmarshaler returns an unmarshaler for the given field.
+func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
+	if f.Type.Kind() == reflect.Map {
+		return makeUnmarshalMap(f)
+	}
+	return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
+}
+
+// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
+func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
+	tagArray := strings.Split(tags, ",")
+	encoding := tagArray[0]
+	name := "unknown"
+	ctype := false
+	isTime := false
+	isDuration := false
+	isWktPointer := false
+	proto3 := false
+	validateUTF8 := true
+	for _, tag := range tagArray[3:] {
+		if strings.HasPrefix(tag, "name=") {
+			name = tag[5:]
+		}
+		if tag == "proto3" {
+			proto3 = true
+		}
+		if strings.HasPrefix(tag, "customtype=") {
+			ctype = true
+		}
+		if tag == "stdtime" {
+			isTime = true
+		}
+		if tag == "stdduration" {
+			isDuration = true
+		}
+		if tag == "wktptr" {
+			isWktPointer = true
+		}
+	}
+	validateUTF8 = validateUTF8 && proto3
+
+	// Figure out packaging (pointer, slice, or both)
+	slice := false
+	pointer := false
+	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+		slice = true
+		t = t.Elem()
+	}
+	if t.Kind() == reflect.Ptr {
+		pointer = true
+		t = t.Elem()
+	}
+
+	if ctype {
+		if reflect.PtrTo(t).Implements(customType) {
+			if slice {
+				return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name)
+			}
+			if pointer {
+				return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalCustom(getUnmarshalInfo(t), name)
+		} else {
+			panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t))
+		}
+	}
+
+	if isTime {
+		if pointer {
+			if slice {
+				return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalTimePtr(getUnmarshalInfo(t), name)
+		}
+		if slice {
+			return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name)
+		}
+		return makeUnmarshalTime(getUnmarshalInfo(t), name)
+	}
+
+	if isDuration {
+		if pointer {
+			if slice {
+				return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name)
+		}
+		if slice {
+			return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name)
+		}
+		return makeUnmarshalDuration(getUnmarshalInfo(t), name)
+	}
+
+	if isWktPointer {
+		switch t.Kind() {
+		case reflect.Float64:
+			if pointer {
+				if slice {
+					return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name)
+		case reflect.Float32:
+			if pointer {
+				if slice {
+					return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name)
+		case reflect.Int64:
+			if pointer {
+				if slice {
+					return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name)
+		case reflect.Uint64:
+			if pointer {
+				if slice {
+					return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name)
+		case reflect.Int32:
+			if pointer {
+				if slice {
+					return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name)
+		case reflect.Uint32:
+			if pointer {
+				if slice {
+					return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name)
+		case reflect.Bool:
+			if pointer {
+				if slice {
+					return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name)
+		case reflect.String:
+			if pointer {
+				if slice {
+					return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name)
+		case uint8SliceType:
+			if pointer {
+				if slice {
+					return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name)
+		default:
+			panic(fmt.Sprintf("unknown wktpointer type %#v", t))
+		}
+	}
+
+	// We'll never have both pointer and slice for basic types.
+	if pointer && slice && t.Kind() != reflect.Struct {
+		panic("both pointer and slice for basic type in " + t.Name())
+	}
+
+	switch t.Kind() {
+	case reflect.Bool:
+		if pointer {
+			return unmarshalBoolPtr
+		}
+		if slice {
+			return unmarshalBoolSlice
+		}
+		return unmarshalBoolValue
+	case reflect.Int32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return unmarshalFixedS32Ptr
+			}
+			if slice {
+				return unmarshalFixedS32Slice
+			}
+			return unmarshalFixedS32Value
+		case "varint":
+			// this could be int32 or enum
+			if pointer {
+				return unmarshalInt32Ptr
+			}
+			if slice {
+				return unmarshalInt32Slice
+			}
+			return unmarshalInt32Value
+		case "zigzag32":
+			if pointer {
+				return unmarshalSint32Ptr
+			}
+			if slice {
+				return unmarshalSint32Slice
+			}
+			return unmarshalSint32Value
+		}
+	case reflect.Int64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return unmarshalFixedS64Ptr
+			}
+			if slice {
+				return unmarshalFixedS64Slice
+			}
+			return unmarshalFixedS64Value
+		case "varint":
+			if pointer {
+				return unmarshalInt64Ptr
+			}
+			if slice {
+				return unmarshalInt64Slice
+			}
+			return unmarshalInt64Value
+		case "zigzag64":
+			if pointer {
+				return unmarshalSint64Ptr
+			}
+			if slice {
+				return unmarshalSint64Slice
+			}
+			return unmarshalSint64Value
+		}
+	case reflect.Uint32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return unmarshalFixed32Ptr
+			}
+			if slice {
+				return unmarshalFixed32Slice
+			}
+			return unmarshalFixed32Value
+		case "varint":
+			if pointer {
+				return unmarshalUint32Ptr
+			}
+			if slice {
+				return unmarshalUint32Slice
+			}
+			return unmarshalUint32Value
+		}
+	case reflect.Uint64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return unmarshalFixed64Ptr
+			}
+			if slice {
+				return unmarshalFixed64Slice
+			}
+			return unmarshalFixed64Value
+		case "varint":
+			if pointer {
+				return unmarshalUint64Ptr
+			}
+			if slice {
+				return unmarshalUint64Slice
+			}
+			return unmarshalUint64Value
+		}
+	case reflect.Float32:
+		if pointer {
+			return unmarshalFloat32Ptr
+		}
+		if slice {
+			return unmarshalFloat32Slice
+		}
+		return unmarshalFloat32Value
+	case reflect.Float64:
+		if pointer {
+			return unmarshalFloat64Ptr
+		}
+		if slice {
+			return unmarshalFloat64Slice
+		}
+		return unmarshalFloat64Value
+	case reflect.Map:
+		panic("map type in typeUnmarshaler in " + t.Name())
+	case reflect.Slice:
+		if pointer {
+			panic("bad pointer in slice case in " + t.Name())
+		}
+		if slice {
+			return unmarshalBytesSlice
+		}
+		return unmarshalBytesValue
+	case reflect.String:
+		if validateUTF8 {
+			if pointer {
+				return unmarshalUTF8StringPtr
+			}
+			if slice {
+				return unmarshalUTF8StringSlice
+			}
+			return unmarshalUTF8StringValue
+		}
+		if pointer {
+			return unmarshalStringPtr
+		}
+		if slice {
+			return unmarshalStringSlice
+		}
+		return unmarshalStringValue
+	case reflect.Struct:
+		// message or group field
+		if !pointer {
+			switch encoding {
+			case "bytes":
+				if slice {
+					return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name)
+				}
+				return makeUnmarshalMessage(getUnmarshalInfo(t), name)
+			}
+		}
+		switch encoding {
+		case "bytes":
+			if slice {
+				return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
+		case "group":
+			if slice {
+				return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
+		}
+	}
+	panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
+}
+
+// Below are all the unmarshalers for individual fields of various types.
+
+func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	*f.toInt64() = v
+	return b, nil
+}
+
+func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	*f.toInt64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int64(x)
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	*f.toInt64() = v
+	return b, nil
+}
+
+func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	*f.toInt64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int64(x>>1) ^ int64(x)<<63>>63
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	*f.toUint64() = v
+	return b, nil
+}
+
+func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	*f.toUint64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := uint64(x)
+			s := f.toUint64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	s := f.toUint64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	*f.toInt32() = v
+	return b, nil
+}
+
+func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	f.setInt32Ptr(v)
+	return b, nil
+}
+
+func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int32(x)
+			f.appendInt32Slice(v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	f.appendInt32Slice(v)
+	return b, nil
+}
+
+func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	*f.toInt32() = v
+	return b, nil
+}
+
+func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	f.setInt32Ptr(v)
+	return b, nil
+}
+
+func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int32(x>>1) ^ int32(x)<<31>>31
+			f.appendInt32Slice(v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	f.appendInt32Slice(v)
+	return b, nil
+}
+
+func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	*f.toUint32() = v
+	return b, nil
+}
+
+func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	*f.toUint32Ptr() = &v
+	return b, nil
+}
+
+func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := uint32(x)
+			s := f.toUint32Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	s := f.toUint32Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	*f.toUint64() = v
+	return b[8:], nil
+}
+
+func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	*f.toUint64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+			s := f.toUint64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	s := f.toUint64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	*f.toInt64() = v
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	*f.toInt64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	*f.toUint32() = v
+	return b[4:], nil
+}
+
+func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	*f.toUint32Ptr() = &v
+	return b[4:], nil
+}
+
+func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+			s := f.toUint32Slice()
+			*s = append(*s, v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	s := f.toUint32Slice()
+	*s = append(*s, v)
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	*f.toInt32() = v
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	f.setInt32Ptr(v)
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+			f.appendInt32Slice(v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	f.appendInt32Slice(v)
+	return b[4:], nil
+}
+
+func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	// Note: any length varint is allowed, even though any sane
+	// encoder will use one byte.
+	// See https://github.com/golang/protobuf/issues/76
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	// TODO: check if x>1? Tests seem to indicate no.
+	v := x != 0
+	*f.toBool() = v
+	return b[n:], nil
+}
+
+func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := x != 0
+	*f.toBoolPtr() = &v
+	return b[n:], nil
+}
+
+func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := x != 0
+			s := f.toBoolSlice()
+			*s = append(*s, v)
+			b = b[n:]
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := x != 0
+	s := f.toBoolSlice()
+	*s = append(*s, v)
+	return b[n:], nil
+}
+
+func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	*f.toFloat64() = v
+	return b[8:], nil
+}
+
+func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	*f.toFloat64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+			s := f.toFloat64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	s := f.toFloat64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	*f.toFloat32() = v
+	return b[4:], nil
+}
+
+func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	*f.toFloat32Ptr() = &v
+	return b[4:], nil
+}
+
+func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+			s := f.toFloat32Slice()
+			*s = append(*s, v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	s := f.toFloat32Slice()
+	*s = append(*s, v)
+	return b[4:], nil
+}
+
+func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toString() = v
+	return b[x:], nil
+}
+
+func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toStringPtr() = &v
+	return b[x:], nil
+}
+
+func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	s := f.toStringSlice()
+	*s = append(*s, v)
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toString() = v
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toStringPtr() = &v
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	s := f.toStringSlice()
+	*s = append(*s, v)
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+var emptyBuf [0]byte
+
+func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	// The use of append here is a trick which avoids the zeroing
+	// that would be required if we used a make/copy pair.
+	// We append to emptyBuf instead of nil because we want
+	// a non-nil result even when the length is 0.
+	v := append(emptyBuf[:], b[:x]...)
+	*f.toBytes() = v
+	return b[x:], nil
+}
+
+func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := append(emptyBuf[:], b[:x]...)
+	s := f.toBytesSlice()
+	*s = append(*s, v)
+	return b[x:], nil
+}
+
+func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return b, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		// First read the message field to see if something is there.
+		// The semantics of multiple submessages are weird.  Instead of
+		// the last one winning (as it is for all other fields), multiple
+		// submessages are merged.
+		v := f.getPointer()
+		if v.isNil() {
+			v = valToPointer(reflect.New(sub.typ))
+			f.setPointer(v)
+		}
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return b, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := valToPointer(reflect.New(sub.typ))
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		f.appendPointer(v)
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireStartGroup {
+			return b, errInternalBadWireType
+		}
+		x, y := findEndGroup(b)
+		if x < 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := f.getPointer()
+		if v.isNil() {
+			v = valToPointer(reflect.New(sub.typ))
+			f.setPointer(v)
+		}
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		return b[y:], err
+	}
+}
+
+func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireStartGroup {
+			return b, errInternalBadWireType
+		}
+		x, y := findEndGroup(b)
+		if x < 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := valToPointer(reflect.New(sub.typ))
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		f.appendPointer(v)
+		return b[y:], err
+	}
+}
+
+func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
+	t := f.Type
+	kt := t.Key()
+	vt := t.Elem()
+	tagArray := strings.Split(f.Tag.Get("protobuf"), ",")
+	valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+	for _, t := range tagArray {
+		if strings.HasPrefix(t, "customtype=") {
+			valTags = append(valTags, t)
+		}
+		if t == "stdtime" {
+			valTags = append(valTags, t)
+		}
+		if t == "stdduration" {
+			valTags = append(valTags, t)
+		}
+		if t == "wktptr" {
+			valTags = append(valTags, t)
+		}
+	}
+	unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
+	unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ","))
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		// The map entry is a submessage. Figure out how big it is.
+		if w != WireBytes {
+			return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		r := b[x:] // unused data to return
+		b = b[:x]  // data for map entry
+
+		// Note: we could use #keys * #values ~= 200 functions
+		// to do map decoding without reflection. Probably not worth it.
+		// Maps will be somewhat slow. Oh well.
+
+		// Read key and value from data.
+		var nerr nonFatal
+		k := reflect.New(kt)
+		v := reflect.New(vt)
+		for len(b) > 0 {
+			x, n := decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			wire := int(x) & 7
+			b = b[n:]
+
+			var err error
+			switch x >> 3 {
+			case 1:
+				b, err = unmarshalKey(b, valToPointer(k), wire)
+			case 2:
+				b, err = unmarshalVal(b, valToPointer(v), wire)
+			default:
+				err = errInternalBadWireType // skip unknown tag
+			}
+
+			if nerr.Merge(err) {
+				continue
+			}
+			if err != errInternalBadWireType {
+				return nil, err
+			}
+
+			// Skip past unknown fields.
+			b, err = skipField(b, wire)
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		// Get map, allocate if needed.
+		m := f.asPointerTo(t).Elem() // an addressable map[K]T
+		if m.IsNil() {
+			m.Set(reflect.MakeMap(t))
+		}
+
+		// Insert into map.
+		m.SetMapIndex(k.Elem(), v.Elem())
+
+		return r, nerr.E
+	}
+}
+
+// makeUnmarshalOneof makes an unmarshaler for oneof fields.
+// for:
+// message Msg {
+//   oneof F {
+//     int64 X = 1;
+//     float64 Y = 2;
+//   }
+// }
+// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
+// ityp is the interface type of the oneof field (e.g. isMsg_F).
+// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
+// Note that this function will be called once for each case in the oneof.
+func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
+	sf := typ.Field(0)
+	field0 := toField(&sf)
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		// Allocate holder for value.
+		v := reflect.New(typ)
+
+		// Unmarshal data into holder.
+		// We unmarshal into the first field of the holder object.
+		var err error
+		var nerr nonFatal
+		b, err = unmarshal(b, valToPointer(v).offset(field0), w)
+		if !nerr.Merge(err) {
+			return nil, err
+		}
+
+		// Write pointer to holder into target field.
+		f.asPointerTo(ityp).Elem().Set(v)
+
+		return b, nerr.E
+	}
+}
+
+// Error used by decode internally.
+var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
+
+// skipField skips past a field of type wire and returns the remaining bytes.
+func skipField(b []byte, wire int) ([]byte, error) {
+	switch wire {
+	case WireVarint:
+		_, k := decodeVarint(b)
+		if k == 0 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[k:]
+	case WireFixed32:
+		if len(b) < 4 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[4:]
+	case WireFixed64:
+		if len(b) < 8 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[8:]
+	case WireBytes:
+		m, k := decodeVarint(b)
+		if k == 0 || uint64(len(b)-k) < m {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[uint64(k)+m:]
+	case WireStartGroup:
+		_, i := findEndGroup(b)
+		if i == -1 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[i:]
+	default:
+		return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
+	}
+	return b, nil
+}
+
+// findEndGroup finds the index of the next EndGroup tag.
+// Groups may be nested, so the "next" EndGroup tag is the first
+// unpaired EndGroup.
+// findEndGroup returns the indexes of the start and end of the EndGroup tag.
+// Returns (-1,-1) if it can't find one.
+func findEndGroup(b []byte) (int, int) {
+	depth := 1
+	i := 0
+	for {
+		x, n := decodeVarint(b[i:])
+		if n == 0 {
+			return -1, -1
+		}
+		j := i
+		i += n
+		switch x & 7 {
+		case WireVarint:
+			_, k := decodeVarint(b[i:])
+			if k == 0 {
+				return -1, -1
+			}
+			i += k
+		case WireFixed32:
+			if len(b)-4 < i {
+				return -1, -1
+			}
+			i += 4
+		case WireFixed64:
+			if len(b)-8 < i {
+				return -1, -1
+			}
+			i += 8
+		case WireBytes:
+			m, k := decodeVarint(b[i:])
+			if k == 0 {
+				return -1, -1
+			}
+			i += k
+			if uint64(len(b)-i) < m {
+				return -1, -1
+			}
+			i += int(m)
+		case WireStartGroup:
+			depth++
+		case WireEndGroup:
+			depth--
+			if depth == 0 {
+				return j, i
+			}
+		default:
+			return -1, -1
+		}
+	}
+}
+
+// encodeVarint appends a varint-encoded integer to b and returns the result.
+func encodeVarint(b []byte, x uint64) []byte {
+	for x >= 1<<7 {
+		b = append(b, byte(x&0x7f|0x80))
+		x >>= 7
+	}
+	return append(b, byte(x))
+}
+
+// decodeVarint reads a varint-encoded integer from b.
+// Returns the decoded integer and the number of bytes read.
+// If there is an error, it returns 0,0.
+func decodeVarint(b []byte) (uint64, int) {
+	var x, y uint64
+	if len(b) <= 0 {
+		goto bad
+	}
+	x = uint64(b[0])
+	if x < 0x80 {
+		return x, 1
+	}
+	x -= 0x80
+
+	if len(b) <= 1 {
+		goto bad
+	}
+	y = uint64(b[1])
+	x += y << 7
+	if y < 0x80 {
+		return x, 2
+	}
+	x -= 0x80 << 7
+
+	if len(b) <= 2 {
+		goto bad
+	}
+	y = uint64(b[2])
+	x += y << 14
+	if y < 0x80 {
+		return x, 3
+	}
+	x -= 0x80 << 14
+
+	if len(b) <= 3 {
+		goto bad
+	}
+	y = uint64(b[3])
+	x += y << 21
+	if y < 0x80 {
+		return x, 4
+	}
+	x -= 0x80 << 21
+
+	if len(b) <= 4 {
+		goto bad
+	}
+	y = uint64(b[4])
+	x += y << 28
+	if y < 0x80 {
+		return x, 5
+	}
+	x -= 0x80 << 28
+
+	if len(b) <= 5 {
+		goto bad
+	}
+	y = uint64(b[5])
+	x += y << 35
+	if y < 0x80 {
+		return x, 6
+	}
+	x -= 0x80 << 35
+
+	if len(b) <= 6 {
+		goto bad
+	}
+	y = uint64(b[6])
+	x += y << 42
+	if y < 0x80 {
+		return x, 7
+	}
+	x -= 0x80 << 42
+
+	if len(b) <= 7 {
+		goto bad
+	}
+	y = uint64(b[7])
+	x += y << 49
+	if y < 0x80 {
+		return x, 8
+	}
+	x -= 0x80 << 49
+
+	if len(b) <= 8 {
+		goto bad
+	}
+	y = uint64(b[8])
+	x += y << 56
+	if y < 0x80 {
+		return x, 9
+	}
+	x -= 0x80 << 56
+
+	if len(b) <= 9 {
+		goto bad
+	}
+	y = uint64(b[9])
+	x += y << 63
+	if y < 2 {
+		return x, 10
+	}
+
+bad:
+	return 0, 0
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go
new file mode 100644
index 0000000..00d6c7a
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go
@@ -0,0 +1,385 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"io"
+	"reflect"
+)
+
+func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		// First read the message field to see if something is there.
+		// The semantics of multiple submessages are weird.  Instead of
+		// the last one winning (as it is for all other fields), multiple
+		// submessages are merged.
+		v := f // gogo: changed from v := f.getPointer()
+		if v.isNil() {
+			v = valToPointer(reflect.New(sub.typ))
+			f.setPointer(v)
+		}
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := valToPointer(reflect.New(sub.typ))
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v)
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.New(sub.typ))
+		m := s.Interface().(custom)
+		if err := m.Unmarshal(b[:x]); err != nil {
+			return nil, err
+		}
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := reflect.New(sub.typ)
+		c := m.Interface().(custom)
+		if err := c.Unmarshal(b[:x]); err != nil {
+			return nil, err
+		}
+		v := valToPointer(m)
+		f.appendRef(v, sub.typ)
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+
+		m := f.asPointerTo(sub.typ).Interface().(custom)
+		if err := m.Unmarshal(b[:x]); err != nil {
+			return nil, err
+		}
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &timestamp{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		t, err := timestampFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(t))
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &timestamp{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		t, err := timestampFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&t))
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &timestamp{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		t, err := timestampFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&t))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &timestamp{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		t, err := timestampFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(t))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &duration{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		d, err := durationFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&d))
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &duration{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		d, err := durationFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(d))
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &duration{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		d, err := durationFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&d))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &duration{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		d, err := durationFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(d))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go
new file mode 100644
index 0000000..0407ba8
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/text.go
@@ -0,0 +1,928 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+	"bufio"
+	"bytes"
+	"encoding"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"math"
+	"reflect"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+)
+
+var (
+	newline         = []byte("\n")
+	spaces          = []byte("                                        ")
+	endBraceNewline = []byte("}\n")
+	backslashN      = []byte{'\\', 'n'}
+	backslashR      = []byte{'\\', 'r'}
+	backslashT      = []byte{'\\', 't'}
+	backslashDQ     = []byte{'\\', '"'}
+	backslashBS     = []byte{'\\', '\\'}
+	posInf          = []byte("inf")
+	negInf          = []byte("-inf")
+	nan             = []byte("nan")
+)
+
+type writer interface {
+	io.Writer
+	WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+	ind      int
+	complete bool // if the current position is a complete line
+	compact  bool // whether to write out as a one-liner
+	w        writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+	if !strings.Contains(s, "\n") {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		w.complete = false
+		return io.WriteString(w.w, s)
+	}
+	// WriteString is typically called without newlines, so this
+	// codepath and its copy are rare.  We copy to avoid
+	// duplicating all of Write's logic here.
+	return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+	newlines := bytes.Count(p, newline)
+	if newlines == 0 {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		n, err = w.w.Write(p)
+		w.complete = false
+		return n, err
+	}
+
+	frags := bytes.SplitN(p, newline, newlines+1)
+	if w.compact {
+		for i, frag := range frags {
+			if i > 0 {
+				if err := w.w.WriteByte(' '); err != nil {
+					return n, err
+				}
+				n++
+			}
+			nn, err := w.w.Write(frag)
+			n += nn
+			if err != nil {
+				return n, err
+			}
+		}
+		return n, nil
+	}
+
+	for i, frag := range frags {
+		if w.complete {
+			w.writeIndent()
+		}
+		nn, err := w.w.Write(frag)
+		n += nn
+		if err != nil {
+			return n, err
+		}
+		if i+1 < len(frags) {
+			if err := w.w.WriteByte('\n'); err != nil {
+				return n, err
+			}
+			n++
+		}
+	}
+	w.complete = len(frags[len(frags)-1]) == 0
+	return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+	if w.compact && c == '\n' {
+		c = ' '
+	}
+	if !w.compact && w.complete {
+		w.writeIndent()
+	}
+	err := w.w.WriteByte(c)
+	w.complete = c == '\n'
+	return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+	if w.ind == 0 {
+		log.Print("proto: textWriter unindented too far")
+		return
+	}
+	w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+	if _, err := w.WriteString(props.OrigName); err != nil {
+		return err
+	}
+	if props.Wire != "group" {
+		return w.WriteByte(':')
+	}
+	return nil
+}
+
+func requiresQuotes(u string) bool {
+	// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+	for _, ch := range u {
+		switch {
+		case ch == '.' || ch == '/' || ch == '_':
+			continue
+		case '0' <= ch && ch <= '9':
+			continue
+		case 'A' <= ch && ch <= 'Z':
+			continue
+		case 'a' <= ch && ch <= 'z':
+			continue
+		default:
+			return true
+		}
+	}
+	return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+	type wkt interface {
+		XXX_WellKnownType() string
+	}
+	t, ok := sv.Addr().Interface().(wkt)
+	return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+	turl := sv.FieldByName("TypeUrl")
+	val := sv.FieldByName("Value")
+	if !turl.IsValid() || !val.IsValid() {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	b, ok := val.Interface().([]byte)
+	if !ok {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	parts := strings.Split(turl.String(), "/")
+	mt := MessageType(parts[len(parts)-1])
+	if mt == nil {
+		return false, nil
+	}
+	m := reflect.New(mt.Elem())
+	if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+		return false, nil
+	}
+	w.Write([]byte("["))
+	u := turl.String()
+	if requiresQuotes(u) {
+		writeString(w, u)
+	} else {
+		w.Write([]byte(u))
+	}
+	if w.compact {
+		w.Write([]byte("]:<"))
+	} else {
+		w.Write([]byte("]: <\n"))
+		w.ind++
+	}
+	if err := tm.writeStruct(w, m.Elem()); err != nil {
+		return true, err
+	}
+	if w.compact {
+		w.Write([]byte("> "))
+	} else {
+		w.ind--
+		w.Write([]byte(">\n"))
+	}
+	return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+	if tm.ExpandAny && isAny(sv) {
+		if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+			return err
+		}
+	}
+	st := sv.Type()
+	sprops := GetProperties(st)
+	for i := 0; i < sv.NumField(); i++ {
+		fv := sv.Field(i)
+		props := sprops.Prop[i]
+		name := st.Field(i).Name
+
+		if name == "XXX_NoUnkeyedLiteral" {
+			continue
+		}
+
+		if strings.HasPrefix(name, "XXX_") {
+			// There are two XXX_ fields:
+			//   XXX_unrecognized []byte
+			//   XXX_extensions   map[int32]proto.Extension
+			// The first is handled here;
+			// the second is handled at the bottom of this function.
+			if name == "XXX_unrecognized" && !fv.IsNil() {
+				if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if fv.Kind() == reflect.Ptr && fv.IsNil() {
+			// Field not filled in. This could be an optional field or
+			// a required field that wasn't filled in. Either way, there
+			// isn't anything we can show for it.
+			continue
+		}
+		if fv.Kind() == reflect.Slice && fv.IsNil() {
+			// Repeated field that is empty, or a bytes field that is unused.
+			continue
+		}
+
+		if props.Repeated && fv.Kind() == reflect.Slice {
+			// Repeated field.
+			for j := 0; j < fv.Len(); j++ {
+				if err := writeName(w, props); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				v := fv.Index(j)
+				if v.Kind() == reflect.Ptr && v.IsNil() {
+					// A nil message in a repeated field is not valid,
+					// but we can handle that more gracefully than panicking.
+					if _, err := w.Write([]byte("<nil>\n")); err != nil {
+						return err
+					}
+					continue
+				}
+				if len(props.Enum) > 0 {
+					if err := tm.writeEnum(w, v, props); err != nil {
+						return err
+					}
+				} else if err := tm.writeAny(w, v, props); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if fv.Kind() == reflect.Map {
+			// Map fields are rendered as a repeated struct with key/value fields.
+			keys := fv.MapKeys()
+			sort.Sort(mapKeys(keys))
+			for _, key := range keys {
+				val := fv.MapIndex(key)
+				if err := writeName(w, props); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				// open struct
+				if err := w.WriteByte('<'); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte('\n'); err != nil {
+						return err
+					}
+				}
+				w.indent()
+				// key
+				if _, err := w.WriteString("key:"); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+				// nil values aren't legal, but we can avoid panicking because of them.
+				if val.Kind() != reflect.Ptr || !val.IsNil() {
+					// value
+					if _, err := w.WriteString("value:"); err != nil {
+						return err
+					}
+					if !w.compact {
+						if err := w.WriteByte(' '); err != nil {
+							return err
+						}
+					}
+					if err := tm.writeAny(w, val, props.MapValProp); err != nil {
+						return err
+					}
+					if err := w.WriteByte('\n'); err != nil {
+						return err
+					}
+				}
+				// close struct
+				w.unindent()
+				if err := w.WriteByte('>'); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+			// empty bytes field
+			continue
+		}
+		if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+			// proto3 non-repeated scalar field; skip if zero value
+			if isProto3Zero(fv) {
+				continue
+			}
+		}
+
+		if fv.Kind() == reflect.Interface {
+			// Check if it is a oneof.
+			if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+				// fv is nil, or holds a pointer to generated struct.
+				// That generated struct has exactly one field,
+				// which has a protobuf struct tag.
+				if fv.IsNil() {
+					continue
+				}
+				inner := fv.Elem().Elem() // interface -> *T -> T
+				tag := inner.Type().Field(0).Tag.Get("protobuf")
+				props = new(Properties) // Overwrite the outer props var, but not its pointee.
+				props.Parse(tag)
+				// Write the value in the oneof, not the oneof itself.
+				fv = inner.Field(0)
+
+				// Special case to cope with malformed messages gracefully:
+				// If the value in the oneof is a nil pointer, don't panic
+				// in writeAny.
+				if fv.Kind() == reflect.Ptr && fv.IsNil() {
+					// Use errors.New so writeAny won't render quotes.
+					msg := errors.New("/* nil */")
+					fv = reflect.ValueOf(&msg).Elem()
+				}
+			}
+		}
+
+		if err := writeName(w, props); err != nil {
+			return err
+		}
+		if !w.compact {
+			if err := w.WriteByte(' '); err != nil {
+				return err
+			}
+		}
+
+		if len(props.Enum) > 0 {
+			if err := tm.writeEnum(w, fv, props); err != nil {
+				return err
+			}
+		} else if err := tm.writeAny(w, fv, props); err != nil {
+			return err
+		}
+
+		if err := w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+
+	// Extensions (the XXX_extensions field).
+	pv := sv
+	if pv.CanAddr() {
+		pv = sv.Addr()
+	} else {
+		pv = reflect.New(sv.Type())
+		pv.Elem().Set(sv)
+	}
+	if _, err := extendable(pv.Interface()); err == nil {
+		if err := tm.writeExtensions(w, pv); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+	v = reflect.Indirect(v)
+
+	if props != nil {
+		if len(props.CustomType) > 0 {
+			custom, ok := v.Interface().(Marshaler)
+			if ok {
+				data, err := custom.Marshal()
+				if err != nil {
+					return err
+				}
+				if err := writeString(w, string(data)); err != nil {
+					return err
+				}
+				return nil
+			}
+		} else if len(props.CastType) > 0 {
+			if _, ok := v.Interface().(interface {
+				String() string
+			}); ok {
+				switch v.Kind() {
+				case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+					reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+					_, err := fmt.Fprintf(w, "%d", v.Interface())
+					return err
+				}
+			}
+		} else if props.StdTime {
+			t, ok := v.Interface().(time.Time)
+			if !ok {
+				return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface())
+			}
+			tproto, err := timestampProto(t)
+			if err != nil {
+				return err
+			}
+			propsCopy := *props // Make a copy so that this is goroutine-safe
+			propsCopy.StdTime = false
+			err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy)
+			return err
+		} else if props.StdDuration {
+			d, ok := v.Interface().(time.Duration)
+			if !ok {
+				return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface())
+			}
+			dproto := durationProto(d)
+			propsCopy := *props // Make a copy so that this is goroutine-safe
+			propsCopy.StdDuration = false
+			err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy)
+			return err
+		}
+	}
+
+	// Floats have special cases.
+	if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+		x := v.Float()
+		var b []byte
+		switch {
+		case math.IsInf(x, 1):
+			b = posInf
+		case math.IsInf(x, -1):
+			b = negInf
+		case math.IsNaN(x):
+			b = nan
+		}
+		if b != nil {
+			_, err := w.Write(b)
+			return err
+		}
+		// Other values are handled below.
+	}
+
+	// We don't attempt to serialise every possible value type; only those
+	// that can occur in protocol buffers.
+	switch v.Kind() {
+	case reflect.Slice:
+		// Should only be a []byte; repeated fields are handled in writeStruct.
+		if err := writeString(w, string(v.Bytes())); err != nil {
+			return err
+		}
+	case reflect.String:
+		if err := writeString(w, v.String()); err != nil {
+			return err
+		}
+	case reflect.Struct:
+		// Required/optional group/message.
+		var bra, ket byte = '<', '>'
+		if props != nil && props.Wire == "group" {
+			bra, ket = '{', '}'
+		}
+		if err := w.WriteByte(bra); err != nil {
+			return err
+		}
+		if !w.compact {
+			if err := w.WriteByte('\n'); err != nil {
+				return err
+			}
+		}
+		w.indent()
+		if v.CanAddr() {
+			// Calling v.Interface on a struct causes the reflect package to
+			// copy the entire struct. This is racy with the new Marshaler
+			// since we atomically update the XXX_sizecache.
+			//
+			// Thus, we retrieve a pointer to the struct if possible to avoid
+			// a race since v.Interface on the pointer doesn't copy the struct.
+			//
+			// If v is not addressable, then we are not worried about a race
+			// since it implies that the binary Marshaler cannot possibly be
+			// mutating this value.
+			v = v.Addr()
+		}
+		if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+			text, err := etm.MarshalText()
+			if err != nil {
+				return err
+			}
+			if _, err = w.Write(text); err != nil {
+				return err
+			}
+		} else {
+			if v.Kind() == reflect.Ptr {
+				v = v.Elem()
+			}
+			if err := tm.writeStruct(w, v); err != nil {
+				return err
+			}
+		}
+		w.unindent()
+		if err := w.WriteByte(ket); err != nil {
+			return err
+		}
+	default:
+		_, err := fmt.Fprint(w, v.Interface())
+		return err
+	}
+	return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+	return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+	// use WriteByte here to get any needed indent
+	if err := w.WriteByte('"'); err != nil {
+		return err
+	}
+	// Loop over the bytes, not the runes.
+	for i := 0; i < len(s); i++ {
+		var err error
+		// Divergence from C++: we don't escape apostrophes.
+		// There's no need to escape them, and the C++ parser
+		// copes with a naked apostrophe.
+		switch c := s[i]; c {
+		case '\n':
+			_, err = w.w.Write(backslashN)
+		case '\r':
+			_, err = w.w.Write(backslashR)
+		case '\t':
+			_, err = w.w.Write(backslashT)
+		case '"':
+			_, err = w.w.Write(backslashDQ)
+		case '\\':
+			_, err = w.w.Write(backslashBS)
+		default:
+			if isprint(c) {
+				err = w.w.WriteByte(c)
+			} else {
+				_, err = fmt.Fprintf(w.w, "\\%03o", c)
+			}
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+	if !w.compact {
+		if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+			return err
+		}
+	}
+	b := NewBuffer(data)
+	for b.index < len(b.buf) {
+		x, err := b.DecodeVarint()
+		if err != nil {
+			_, ferr := fmt.Fprintf(w, "/* %v */\n", err)
+			return ferr
+		}
+		wire, tag := x&7, x>>3
+		if wire == WireEndGroup {
+			w.unindent()
+			if _, werr := w.Write(endBraceNewline); werr != nil {
+				return werr
+			}
+			continue
+		}
+		if _, ferr := fmt.Fprint(w, tag); ferr != nil {
+			return ferr
+		}
+		if wire != WireStartGroup {
+			if err = w.WriteByte(':'); err != nil {
+				return err
+			}
+		}
+		if !w.compact || wire == WireStartGroup {
+			if err = w.WriteByte(' '); err != nil {
+				return err
+			}
+		}
+		switch wire {
+		case WireBytes:
+			buf, e := b.DecodeRawBytes(false)
+			if e == nil {
+				_, err = fmt.Fprintf(w, "%q", buf)
+			} else {
+				_, err = fmt.Fprintf(w, "/* %v */", e)
+			}
+		case WireFixed32:
+			x, err = b.DecodeFixed32()
+			err = writeUnknownInt(w, x, err)
+		case WireFixed64:
+			x, err = b.DecodeFixed64()
+			err = writeUnknownInt(w, x, err)
+		case WireStartGroup:
+			err = w.WriteByte('{')
+			w.indent()
+		case WireVarint:
+			x, err = b.DecodeVarint()
+			err = writeUnknownInt(w, x, err)
+		default:
+			_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+		}
+		if err != nil {
+			return err
+		}
+		if err := w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+	if err == nil {
+		_, err = fmt.Fprint(w, x)
+	} else {
+		_, err = fmt.Fprintf(w, "/* %v */", err)
+	}
+	return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int           { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+	emap := extensionMaps[pv.Type().Elem()]
+	e := pv.Interface().(Message)
+
+	var m map[int32]Extension
+	var mu sync.Locker
+	if em, ok := e.(extensionsBytes); ok {
+		eb := em.GetExtensions()
+		var err error
+		m, err = BytesToExtensionsMap(*eb)
+		if err != nil {
+			return err
+		}
+		mu = notLocker{}
+	} else if _, ok := e.(extendableProto); ok {
+		ep, _ := extendable(e)
+		m, mu = ep.extensionsRead()
+		if m == nil {
+			return nil
+		}
+	}
+
+	// Order the extensions by ID.
+	// This isn't strictly necessary, but it will give us
+	// canonical output, which will also make testing easier.
+
+	mu.Lock()
+	ids := make([]int32, 0, len(m))
+	for id := range m {
+		ids = append(ids, id)
+	}
+	sort.Sort(int32Slice(ids))
+	mu.Unlock()
+
+	for _, extNum := range ids {
+		ext := m[extNum]
+		var desc *ExtensionDesc
+		if emap != nil {
+			desc = emap[extNum]
+		}
+		if desc == nil {
+			// Unknown extension.
+			if err := writeUnknownStruct(w, ext.enc); err != nil {
+				return err
+			}
+			continue
+		}
+
+		pb, err := GetExtension(e, desc)
+		if err != nil {
+			return fmt.Errorf("failed getting extension: %v", err)
+		}
+
+		// Repeated extensions will appear as a slice.
+		if !desc.repeated() {
+			if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+				return err
+			}
+		} else {
+			v := reflect.ValueOf(pb)
+			for i := 0; i < v.Len(); i++ {
+				if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+					return err
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+	if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+		return err
+	}
+	if !w.compact {
+		if err := w.WriteByte(' '); err != nil {
+			return err
+		}
+	}
+	if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+		return err
+	}
+	if err := w.WriteByte('\n'); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (w *textWriter) writeIndent() {
+	if !w.complete {
+		return
+	}
+	remain := w.ind * 2
+	for remain > 0 {
+		n := remain
+		if n > len(spaces) {
+			n = len(spaces)
+		}
+		w.w.Write(spaces[:n])
+		remain -= n
+	}
+	w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+	Compact   bool // use compact text format (one line).
+	ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+	val := reflect.ValueOf(pb)
+	if pb == nil || val.IsNil() {
+		w.Write([]byte("<nil>"))
+		return nil
+	}
+	var bw *bufio.Writer
+	ww, ok := w.(writer)
+	if !ok {
+		bw = bufio.NewWriter(w)
+		ww = bw
+	}
+	aw := &textWriter{
+		w:        ww,
+		complete: true,
+		compact:  tm.Compact,
+	}
+
+	if etm, ok := pb.(encoding.TextMarshaler); ok {
+		text, err := etm.MarshalText()
+		if err != nil {
+			return err
+		}
+		if _, err = aw.Write(text); err != nil {
+			return err
+		}
+		if bw != nil {
+			return bw.Flush()
+		}
+		return nil
+	}
+	// Dereference the received pointer so we don't have outer < and >.
+	v := reflect.Indirect(val)
+	if err := tm.writeStruct(aw, v); err != nil {
+		return err
+	}
+	if bw != nil {
+		return bw.Flush()
+	}
+	return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+	var buf bytes.Buffer
+	tm.Marshal(&buf, pb)
+	return buf.String()
+}
+
+var (
+	defaultTextMarshaler = TextMarshaler{}
+	compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go
new file mode 100644
index 0000000..1d6c6aa
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go
@@ -0,0 +1,57 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"reflect"
+)
+
+func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error {
+	m, ok := enumStringMaps[props.Enum]
+	if !ok {
+		if err := tm.writeAny(w, v, props); err != nil {
+			return err
+		}
+	}
+	key := int32(0)
+	if v.Kind() == reflect.Ptr {
+		key = int32(v.Elem().Int())
+	} else {
+		key = int32(v.Int())
+	}
+	s, ok := m[key]
+	if !ok {
+		if err := tm.writeAny(w, v, props); err != nil {
+			return err
+		}
+	}
+	_, err := fmt.Fprint(w, s)
+	return err
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..1ce0be2
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go
@@ -0,0 +1,1018 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+	"encoding"
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+	Message string
+	Line    int // 1-based line number
+	Offset  int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+	if p.Line == 1 {
+		// show offset only for first line
+		return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+	}
+	return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+	value    string
+	err      *ParseError
+	line     int    // line number
+	offset   int    // byte number from start of input, not start of line
+	unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+	if t.err == nil {
+		return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+	}
+	return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+	s            string // remaining input
+	done         bool   // whether the parsing is finished (success or error)
+	backed       bool   // whether back() was called
+	offset, line int
+	cur          token
+}
+
+func newTextParser(s string) *textParser {
+	p := new(textParser)
+	p.s = s
+	p.line = 1
+	p.cur.line = 1
+	return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+	pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+	p.cur.err = pe
+	p.done = true
+	return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+	switch {
+	case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+		return true
+	case '0' <= c && c <= '9':
+		return true
+	}
+	switch c {
+	case '-', '+', '.', '_':
+		return true
+	}
+	return false
+}
+
+func isWhitespace(c byte) bool {
+	switch c {
+	case ' ', '\t', '\n', '\r':
+		return true
+	}
+	return false
+}
+
+func isQuote(c byte) bool {
+	switch c {
+	case '"', '\'':
+		return true
+	}
+	return false
+}
+
+func (p *textParser) skipWhitespace() {
+	i := 0
+	for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+		if p.s[i] == '#' {
+			// comment; skip to end of line or input
+			for i < len(p.s) && p.s[i] != '\n' {
+				i++
+			}
+			if i == len(p.s) {
+				break
+			}
+		}
+		if p.s[i] == '\n' {
+			p.line++
+		}
+		i++
+	}
+	p.offset += i
+	p.s = p.s[i:len(p.s)]
+	if len(p.s) == 0 {
+		p.done = true
+	}
+}
+
+func (p *textParser) advance() {
+	// Skip whitespace
+	p.skipWhitespace()
+	if p.done {
+		return
+	}
+
+	// Start of non-whitespace
+	p.cur.err = nil
+	p.cur.offset, p.cur.line = p.offset, p.line
+	p.cur.unquoted = ""
+	switch p.s[0] {
+	case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+		// Single symbol
+		p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+	case '"', '\'':
+		// Quoted string
+		i := 1
+		for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+			if p.s[i] == '\\' && i+1 < len(p.s) {
+				// skip escaped char
+				i++
+			}
+			i++
+		}
+		if i >= len(p.s) || p.s[i] != p.s[0] {
+			p.errorf("unmatched quote")
+			return
+		}
+		unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+		if err != nil {
+			p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+			return
+		}
+		p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+		p.cur.unquoted = unq
+	default:
+		i := 0
+		for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+			i++
+		}
+		if i == 0 {
+			p.errorf("unexpected byte %#x", p.s[0])
+			return
+		}
+		p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+	}
+	p.offset += len(p.cur.value)
+}
+
+var (
+	errBadUTF8 = errors.New("proto: bad UTF-8")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+	// This is based on C++'s tokenizer.cc.
+	// Despite its name, this is *not* parsing C syntax.
+	// For instance, "\0" is an invalid quoted string.
+
+	// Avoid allocation in trivial cases.
+	simple := true
+	for _, r := range s {
+		if r == '\\' || r == quote {
+			simple = false
+			break
+		}
+	}
+	if simple {
+		return s, nil
+	}
+
+	buf := make([]byte, 0, 3*len(s)/2)
+	for len(s) > 0 {
+		r, n := utf8.DecodeRuneInString(s)
+		if r == utf8.RuneError && n == 1 {
+			return "", errBadUTF8
+		}
+		s = s[n:]
+		if r != '\\' {
+			if r < utf8.RuneSelf {
+				buf = append(buf, byte(r))
+			} else {
+				buf = append(buf, string(r)...)
+			}
+			continue
+		}
+
+		ch, tail, err := unescape(s)
+		if err != nil {
+			return "", err
+		}
+		buf = append(buf, ch...)
+		s = tail
+	}
+	return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+	r, n := utf8.DecodeRuneInString(s)
+	if r == utf8.RuneError && n == 1 {
+		return "", "", errBadUTF8
+	}
+	s = s[n:]
+	switch r {
+	case 'a':
+		return "\a", s, nil
+	case 'b':
+		return "\b", s, nil
+	case 'f':
+		return "\f", s, nil
+	case 'n':
+		return "\n", s, nil
+	case 'r':
+		return "\r", s, nil
+	case 't':
+		return "\t", s, nil
+	case 'v':
+		return "\v", s, nil
+	case '?':
+		return "?", s, nil // trigraph workaround
+	case '\'', '"', '\\':
+		return string(r), s, nil
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		if len(s) < 2 {
+			return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+		}
+		ss := string(r) + s[:2]
+		s = s[2:]
+		i, err := strconv.ParseUint(ss, 8, 8)
+		if err != nil {
+			return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+		}
+		return string([]byte{byte(i)}), s, nil
+	case 'x', 'X', 'u', 'U':
+		var n int
+		switch r {
+		case 'x', 'X':
+			n = 2
+		case 'u':
+			n = 4
+		case 'U':
+			n = 8
+		}
+		if len(s) < n {
+			return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+		}
+		ss := s[:n]
+		s = s[n:]
+		i, err := strconv.ParseUint(ss, 16, 64)
+		if err != nil {
+			return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+		}
+		if r == 'x' || r == 'X' {
+			return string([]byte{byte(i)}), s, nil
+		}
+		if i > utf8.MaxRune {
+			return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+		}
+		return string(i), s, nil
+	}
+	return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+	if p.backed || p.done {
+		p.backed = false
+		return &p.cur
+	}
+	p.advance()
+	if p.done {
+		p.cur.value = ""
+	} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+		// Look for multiple quoted strings separated by whitespace,
+		// and concatenate them.
+		cat := p.cur
+		for {
+			p.skipWhitespace()
+			if p.done || !isQuote(p.s[0]) {
+				break
+			}
+			p.advance()
+			if p.cur.err != nil {
+				return &p.cur
+			}
+			cat.value += " " + p.cur.value
+			cat.unquoted += p.cur.unquoted
+		}
+		p.done = false // parser may have seen EOF, but we want to return cat
+		p.cur = cat
+	}
+	return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != s {
+		p.back()
+		return p.errorf("expected %q, found %q", s, tok.value)
+	}
+	return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+	st := sv.Type()
+	sprops := GetProperties(st)
+	for i := 0; i < st.NumField(); i++ {
+		if !isNil(sv.Field(i)) {
+			continue
+		}
+
+		props := sprops.Prop[i]
+		if props.Required {
+			return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+		}
+	}
+	return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+	i, ok := sprops.decoderOrigNames[name]
+	if ok {
+		return i, sprops.Prop[i], true
+	}
+	return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ":" {
+		// Colon is optional when the field is a group or message.
+		needColon := true
+		switch props.Wire {
+		case "group":
+			needColon = false
+		case "bytes":
+			// A "bytes" field is either a message, a string, or a repeated field;
+			// those three become *T, *string and []T respectively, so we can check for
+			// this field being a pointer to a non-string.
+			if typ.Kind() == reflect.Ptr {
+				// *T or *string
+				if typ.Elem().Kind() == reflect.String {
+					break
+				}
+			} else if typ.Kind() == reflect.Slice {
+				// []T or []*T
+				if typ.Elem().Kind() != reflect.Ptr {
+					break
+				}
+			} else if typ.Kind() == reflect.String {
+				// The proto3 exception is for a string field,
+				// which requires a colon.
+				break
+			}
+			needColon = false
+		}
+		if needColon {
+			return p.errorf("expected ':', found %q", tok.value)
+		}
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+	st := sv.Type()
+	sprops := GetProperties(st)
+	reqCount := sprops.reqCount
+	var reqFieldErr error
+	fieldSet := make(map[string]bool)
+	// A struct is a sequence of "name: value", terminated by one of
+	// '>' or '}', or the end of the input.  A name may also be
+	// "[extension]" or "[type/url]".
+	//
+	// The whole struct can also be an expanded Any message, like:
+	// [type/url] < ... struct contents ... >
+	for {
+		tok := p.next()
+		if tok.err != nil {
+			return tok.err
+		}
+		if tok.value == terminator {
+			break
+		}
+		if tok.value == "[" {
+			// Looks like an extension or an Any.
+			//
+			// TODO: Check whether we need to handle
+			// namespace rooted names (e.g. ".something.Foo").
+			extName, err := p.consumeExtName()
+			if err != nil {
+				return err
+			}
+
+			if s := strings.LastIndex(extName, "/"); s >= 0 {
+				// If it contains a slash, it's an Any type URL.
+				messageName := extName[s+1:]
+				mt := MessageType(messageName)
+				if mt == nil {
+					return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+				}
+				tok = p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				// consume an optional colon
+				if tok.value == ":" {
+					tok = p.next()
+					if tok.err != nil {
+						return tok.err
+					}
+				}
+				var terminator string
+				switch tok.value {
+				case "<":
+					terminator = ">"
+				case "{":
+					terminator = "}"
+				default:
+					return p.errorf("expected '{' or '<', found %q", tok.value)
+				}
+				v := reflect.New(mt.Elem())
+				if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+					return pe
+				}
+				b, err := Marshal(v.Interface().(Message))
+				if err != nil {
+					return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+				}
+				if fieldSet["type_url"] {
+					return p.errorf(anyRepeatedlyUnpacked, "type_url")
+				}
+				if fieldSet["value"] {
+					return p.errorf(anyRepeatedlyUnpacked, "value")
+				}
+				sv.FieldByName("TypeUrl").SetString(extName)
+				sv.FieldByName("Value").SetBytes(b)
+				fieldSet["type_url"] = true
+				fieldSet["value"] = true
+				continue
+			}
+
+			var desc *ExtensionDesc
+			// This could be faster, but it's functional.
+			// TODO: Do something smarter than a linear scan.
+			for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+				if d.Name == extName {
+					desc = d
+					break
+				}
+			}
+			if desc == nil {
+				return p.errorf("unrecognized extension %q", extName)
+			}
+
+			props := &Properties{}
+			props.Parse(desc.Tag)
+
+			typ := reflect.TypeOf(desc.ExtensionType)
+			if err := p.checkForColon(props, typ); err != nil {
+				return err
+			}
+
+			rep := desc.repeated()
+
+			// Read the extension structure, and set it in
+			// the value we're constructing.
+			var ext reflect.Value
+			if !rep {
+				ext = reflect.New(typ).Elem()
+			} else {
+				ext = reflect.New(typ.Elem()).Elem()
+			}
+			if err := p.readAny(ext, props); err != nil {
+				if _, ok := err.(*RequiredNotSetError); !ok {
+					return err
+				}
+				reqFieldErr = err
+			}
+			ep := sv.Addr().Interface().(Message)
+			if !rep {
+				SetExtension(ep, desc, ext.Interface())
+			} else {
+				old, err := GetExtension(ep, desc)
+				var sl reflect.Value
+				if err == nil {
+					sl = reflect.ValueOf(old) // existing slice
+				} else {
+					sl = reflect.MakeSlice(typ, 0, 1)
+				}
+				sl = reflect.Append(sl, ext)
+				SetExtension(ep, desc, sl.Interface())
+			}
+			if err := p.consumeOptionalSeparator(); err != nil {
+				return err
+			}
+			continue
+		}
+
+		// This is a normal, non-extension field.
+		name := tok.value
+		var dst reflect.Value
+		fi, props, ok := structFieldByName(sprops, name)
+		if ok {
+			dst = sv.Field(fi)
+		} else if oop, ok := sprops.OneofTypes[name]; ok {
+			// It is a oneof.
+			props = oop.Prop
+			nv := reflect.New(oop.Type.Elem())
+			dst = nv.Elem().Field(0)
+			field := sv.Field(oop.Field)
+			if !field.IsNil() {
+				return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+			}
+			field.Set(nv)
+		}
+		if !dst.IsValid() {
+			return p.errorf("unknown field name %q in %v", name, st)
+		}
+
+		if dst.Kind() == reflect.Map {
+			// Consume any colon.
+			if err := p.checkForColon(props, dst.Type()); err != nil {
+				return err
+			}
+
+			// Construct the map if it doesn't already exist.
+			if dst.IsNil() {
+				dst.Set(reflect.MakeMap(dst.Type()))
+			}
+			key := reflect.New(dst.Type().Key()).Elem()
+			val := reflect.New(dst.Type().Elem()).Elem()
+
+			// The map entry should be this sequence of tokens:
+			//	< key : KEY value : VALUE >
+			// However, implementations may omit key or value, and technically
+			// we should support them in any order.  See b/28924776 for a time
+			// this went wrong.
+
+			tok := p.next()
+			var terminator string
+			switch tok.value {
+			case "<":
+				terminator = ">"
+			case "{":
+				terminator = "}"
+			default:
+				return p.errorf("expected '{' or '<', found %q", tok.value)
+			}
+			for {
+				tok := p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				if tok.value == terminator {
+					break
+				}
+				switch tok.value {
+				case "key":
+					if err := p.consumeToken(":"); err != nil {
+						return err
+					}
+					if err := p.readAny(key, props.MapKeyProp); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				case "value":
+					if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
+						return err
+					}
+					if err := p.readAny(val, props.MapValProp); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				default:
+					p.back()
+					return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+				}
+			}
+
+			dst.SetMapIndex(key, val)
+			continue
+		}
+
+		// Check that it's not already set if it's not a repeated field.
+		if !props.Repeated && fieldSet[name] {
+			return p.errorf("non-repeated field %q was repeated", name)
+		}
+
+		if err := p.checkForColon(props, dst.Type()); err != nil {
+			return err
+		}
+
+		// Parse into the field.
+		fieldSet[name] = true
+		if err := p.readAny(dst, props); err != nil {
+			if _, ok := err.(*RequiredNotSetError); !ok {
+				return err
+			}
+			reqFieldErr = err
+		}
+		if props.Required {
+			reqCount--
+		}
+
+		if err := p.consumeOptionalSeparator(); err != nil {
+			return err
+		}
+
+	}
+
+	if reqCount > 0 {
+		return p.missingRequiredFieldError(sv)
+	}
+	return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+	tok := p.next()
+	if tok.err != nil {
+		return "", tok.err
+	}
+
+	// If extension name or type url is quoted, it's a single token.
+	if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+		name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+		if err != nil {
+			return "", err
+		}
+		return name, p.consumeToken("]")
+	}
+
+	// Consume everything up to "]"
+	var parts []string
+	for tok.value != "]" {
+		parts = append(parts, tok.value)
+		tok = p.next()
+		if tok.err != nil {
+			return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+		}
+		if p.done && tok.value != "]" {
+			return "", p.errorf("unclosed type_url or extension name")
+		}
+	}
+	return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ";" && tok.value != "," {
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value == "" {
+		return p.errorf("unexpected EOF")
+	}
+	if len(props.CustomType) > 0 {
+		if props.Repeated {
+			t := reflect.TypeOf(v.Interface())
+			if t.Kind() == reflect.Slice {
+				tc := reflect.TypeOf(new(Marshaler))
+				ok := t.Elem().Implements(tc.Elem())
+				if ok {
+					fv := v
+					flen := fv.Len()
+					if flen == fv.Cap() {
+						nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1)
+						reflect.Copy(nav, fv)
+						fv.Set(nav)
+					}
+					fv.SetLen(flen + 1)
+
+					// Read one.
+					p.back()
+					return p.readAny(fv.Index(flen), props)
+				}
+			}
+		}
+		if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
+			custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler)
+			err := custom.Unmarshal([]byte(tok.unquoted))
+			if err != nil {
+				return p.errorf("%v %v: %v", err, v.Type(), tok.value)
+			}
+			v.Set(reflect.ValueOf(custom))
+		} else {
+			custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler)
+			err := custom.Unmarshal([]byte(tok.unquoted))
+			if err != nil {
+				return p.errorf("%v %v: %v", err, v.Type(), tok.value)
+			}
+			v.Set(reflect.Indirect(reflect.ValueOf(custom)))
+		}
+		return nil
+	}
+	if props.StdTime {
+		fv := v
+		p.back()
+		props.StdTime = false
+		tproto := &timestamp{}
+		err := p.readAny(reflect.ValueOf(tproto).Elem(), props)
+		props.StdTime = true
+		if err != nil {
+			return err
+		}
+		tim, err := timestampFromProto(tproto)
+		if err != nil {
+			return err
+		}
+		if props.Repeated {
+			t := reflect.TypeOf(v.Interface())
+			if t.Kind() == reflect.Slice {
+				if t.Elem().Kind() == reflect.Ptr {
+					ts := fv.Interface().([]*time.Time)
+					ts = append(ts, &tim)
+					fv.Set(reflect.ValueOf(ts))
+					return nil
+				} else {
+					ts := fv.Interface().([]time.Time)
+					ts = append(ts, tim)
+					fv.Set(reflect.ValueOf(ts))
+					return nil
+				}
+			}
+		}
+		if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
+			v.Set(reflect.ValueOf(&tim))
+		} else {
+			v.Set(reflect.Indirect(reflect.ValueOf(&tim)))
+		}
+		return nil
+	}
+	if props.StdDuration {
+		fv := v
+		p.back()
+		props.StdDuration = false
+		dproto := &duration{}
+		err := p.readAny(reflect.ValueOf(dproto).Elem(), props)
+		props.StdDuration = true
+		if err != nil {
+			return err
+		}
+		dur, err := durationFromProto(dproto)
+		if err != nil {
+			return err
+		}
+		if props.Repeated {
+			t := reflect.TypeOf(v.Interface())
+			if t.Kind() == reflect.Slice {
+				if t.Elem().Kind() == reflect.Ptr {
+					ds := fv.Interface().([]*time.Duration)
+					ds = append(ds, &dur)
+					fv.Set(reflect.ValueOf(ds))
+					return nil
+				} else {
+					ds := fv.Interface().([]time.Duration)
+					ds = append(ds, dur)
+					fv.Set(reflect.ValueOf(ds))
+					return nil
+				}
+			}
+		}
+		if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
+			v.Set(reflect.ValueOf(&dur))
+		} else {
+			v.Set(reflect.Indirect(reflect.ValueOf(&dur)))
+		}
+		return nil
+	}
+	switch fv := v; fv.Kind() {
+	case reflect.Slice:
+		at := v.Type()
+		if at.Elem().Kind() == reflect.Uint8 {
+			// Special case for []byte
+			if tok.value[0] != '"' && tok.value[0] != '\'' {
+				// Deliberately written out here, as the error after
+				// this switch statement would write "invalid []byte: ...",
+				// which is not as user-friendly.
+				return p.errorf("invalid string: %v", tok.value)
+			}
+			bytes := []byte(tok.unquoted)
+			fv.Set(reflect.ValueOf(bytes))
+			return nil
+		}
+		// Repeated field.
+		if tok.value == "[" {
+			// Repeated field with list notation, like [1,2,3].
+			for {
+				fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+				err := p.readAny(fv.Index(fv.Len()-1), props)
+				if err != nil {
+					return err
+				}
+				ntok := p.next()
+				if ntok.err != nil {
+					return ntok.err
+				}
+				if ntok.value == "]" {
+					break
+				}
+				if ntok.value != "," {
+					return p.errorf("Expected ']' or ',' found %q", ntok.value)
+				}
+			}
+			return nil
+		}
+		// One value of the repeated field.
+		p.back()
+		fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+		return p.readAny(fv.Index(fv.Len()-1), props)
+	case reflect.Bool:
+		// true/1/t/True or false/f/0/False.
+		switch tok.value {
+		case "true", "1", "t", "True":
+			fv.SetBool(true)
+			return nil
+		case "false", "0", "f", "False":
+			fv.SetBool(false)
+			return nil
+		}
+	case reflect.Float32, reflect.Float64:
+		v := tok.value
+		// Ignore 'f' for compatibility with output generated by C++, but don't
+		// remove 'f' when the value is "-inf" or "inf".
+		if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+			v = v[:len(v)-1]
+		}
+		if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+			fv.SetFloat(f)
+			return nil
+		}
+	case reflect.Int8:
+		if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+	case reflect.Int16:
+		if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+	case reflect.Int32:
+		if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+
+		if len(props.Enum) == 0 {
+			break
+		}
+		m, ok := enumValueMaps[props.Enum]
+		if !ok {
+			break
+		}
+		x, ok := m[tok.value]
+		if !ok {
+			break
+		}
+		fv.SetInt(int64(x))
+		return nil
+	case reflect.Int64:
+		if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+
+	case reflect.Ptr:
+		// A basic field (indirected through pointer), or a repeated message/group
+		p.back()
+		fv.Set(reflect.New(fv.Type().Elem()))
+		return p.readAny(fv.Elem(), props)
+	case reflect.String:
+		if tok.value[0] == '"' || tok.value[0] == '\'' {
+			fv.SetString(tok.unquoted)
+			return nil
+		}
+	case reflect.Struct:
+		var terminator string
+		switch tok.value {
+		case "{":
+			terminator = "}"
+		case "<":
+			terminator = ">"
+		default:
+			return p.errorf("expected '{' or '<', found %q", tok.value)
+		}
+		// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+		return p.readStruct(fv, terminator)
+	case reflect.Uint8:
+		if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil {
+			fv.SetUint(x)
+			return nil
+		}
+	case reflect.Uint16:
+		if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil {
+			fv.SetUint(x)
+			return nil
+		}
+	case reflect.Uint32:
+		if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+			fv.SetUint(uint64(x))
+			return nil
+		}
+	case reflect.Uint64:
+		if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+			fv.SetUint(x)
+			return nil
+		}
+	}
+	return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+	if um, ok := pb.(encoding.TextUnmarshaler); ok {
+		return um.UnmarshalText([]byte(s))
+	}
+	pb.Reset()
+	v := reflect.ValueOf(pb)
+	return newTextParser(s).readStruct(v.Elem(), "")
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go
new file mode 100644
index 0000000..9324f65
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go
@@ -0,0 +1,113 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// This file implements operations on google.protobuf.Timestamp.
+
+import (
+	"errors"
+	"fmt"
+	"time"
+)
+
+const (
+	// Seconds field of the earliest valid Timestamp.
+	// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	minValidSeconds = -62135596800
+	// Seconds field just after the latest valid Timestamp.
+	// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	maxValidSeconds = 253402300800
+)
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range
+// [0001-01-01, 10000-01-01) and has a Nanos field
+// in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes
+// the problem.
+//
+// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
+func validateTimestamp(ts *timestamp) error {
+	if ts == nil {
+		return errors.New("timestamp: nil Timestamp")
+	}
+	if ts.Seconds < minValidSeconds {
+		return fmt.Errorf("timestamp: %#v before 0001-01-01", ts)
+	}
+	if ts.Seconds >= maxValidSeconds {
+		return fmt.Errorf("timestamp: %#v after 10000-01-01", ts)
+	}
+	if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+		return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts)
+	}
+	return nil
+}
+
+// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time.
+// It returns an error if the argument is invalid.
+//
+// Unlike most Go functions, if Timestamp returns an error, the first return value
+// is not the zero time.Time. Instead, it is the value obtained from the
+// time.Unix function when passed the contents of the Timestamp, in the UTC
+// locale. This may or may not be a meaningful time; many invalid Timestamps
+// do map to valid time.Times.
+//
+// A nil Timestamp returns an error. The first return value in that case is
+// undefined.
+func timestampFromProto(ts *timestamp) (time.Time, error) {
+	// Don't return the zero value on error, because corresponds to a valid
+	// timestamp. Instead return whatever time.Unix gives us.
+	var t time.Time
+	if ts == nil {
+		t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
+	} else {
+		t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
+	}
+	return t, validateTimestamp(ts)
+}
+
+// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
+// It returns an error if the resulting Timestamp is invalid.
+func timestampProto(t time.Time) (*timestamp, error) {
+	seconds := t.Unix()
+	nanos := int32(t.Sub(time.Unix(seconds, 0)))
+	ts := &timestamp{
+		Seconds: seconds,
+		Nanos:   nanos,
+	}
+	if err := validateTimestamp(ts); err != nil {
+		return nil, err
+	}
+	return ts, nil
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go
new file mode 100644
index 0000000..38439fa
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go
@@ -0,0 +1,49 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2016, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"reflect"
+	"time"
+)
+
+var timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
+
+type timestamp struct {
+	Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+	Nanos   int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+}
+
+func (m *timestamp) Reset()       { *m = timestamp{} }
+func (*timestamp) ProtoMessage()  {}
+func (*timestamp) String() string { return "timestamp<string>" }
+
+func init() {
+	RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp")
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go
new file mode 100644
index 0000000..b175d1b
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/wrappers.go
@@ -0,0 +1,1888 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"io"
+	"reflect"
+)
+
+func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*float64)
+			v := &float64Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*float64)
+			v := &float64Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64)
+			v := &float64Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64)
+			v := &float64Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(float64)
+				v := &float64Value{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(float64)
+				v := &float64Value{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*float64)
+				v := &float64Value{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*float64)
+				v := &float64Value{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*float32)
+			v := &float32Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*float32)
+			v := &float32Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32)
+			v := &float32Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32)
+			v := &float32Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(float32)
+				v := &float32Value{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(float32)
+				v := &float32Value{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*float32)
+				v := &float32Value{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*float32)
+				v := &float32Value{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*int64)
+			v := &int64Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*int64)
+			v := &int64Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64)
+			v := &int64Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64)
+			v := &int64Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(int64)
+				v := &int64Value{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(int64)
+				v := &int64Value{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*int64)
+				v := &int64Value{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*int64)
+				v := &int64Value{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*uint64)
+			v := &uint64Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*uint64)
+			v := &uint64Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64)
+			v := &uint64Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64)
+			v := &uint64Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(uint64)
+				v := &uint64Value{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(uint64)
+				v := &uint64Value{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*uint64)
+				v := &uint64Value{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*uint64)
+				v := &uint64Value{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*int32)
+			v := &int32Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*int32)
+			v := &int32Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32)
+			v := &int32Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32)
+			v := &int32Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(int32)
+				v := &int32Value{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(int32)
+				v := &int32Value{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*int32)
+				v := &int32Value{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*int32)
+				v := &int32Value{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*uint32)
+			v := &uint32Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*uint32)
+			v := &uint32Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32)
+			v := &uint32Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32)
+			v := &uint32Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(uint32)
+				v := &uint32Value{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(uint32)
+				v := &uint32Value{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*uint32)
+				v := &uint32Value{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*uint32)
+				v := &uint32Value{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*bool)
+			v := &boolValue{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*bool)
+			v := &boolValue{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool)
+			v := &boolValue{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool)
+			v := &boolValue{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(bool)
+				v := &boolValue{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(bool)
+				v := &boolValue{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*bool)
+				v := &boolValue{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*bool)
+				v := &boolValue{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &boolValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &boolValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &boolValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &boolValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*string)
+			v := &stringValue{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*string)
+			v := &stringValue{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string)
+			v := &stringValue{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string)
+			v := &stringValue{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(string)
+				v := &stringValue{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(string)
+				v := &stringValue{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*string)
+				v := &stringValue{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*string)
+				v := &stringValue{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &stringValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &stringValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &stringValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &stringValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*[]byte)
+			v := &bytesValue{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*[]byte)
+			v := &bytesValue{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte)
+			v := &bytesValue{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte)
+			v := &bytesValue{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().([]byte)
+				v := &bytesValue{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().([]byte)
+				v := &bytesValue{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*[]byte)
+				v := &bytesValue{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*[]byte)
+				v := &bytesValue{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &bytesValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &bytesValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &bytesValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &bytesValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go
new file mode 100644
index 0000000..c1cf7bf
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go
@@ -0,0 +1,113 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+type float64Value struct {
+	Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *float64Value) Reset()       { *m = float64Value{} }
+func (*float64Value) ProtoMessage()  {}
+func (*float64Value) String() string { return "float64<string>" }
+
+type float32Value struct {
+	Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *float32Value) Reset()       { *m = float32Value{} }
+func (*float32Value) ProtoMessage()  {}
+func (*float32Value) String() string { return "float32<string>" }
+
+type int64Value struct {
+	Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *int64Value) Reset()       { *m = int64Value{} }
+func (*int64Value) ProtoMessage()  {}
+func (*int64Value) String() string { return "int64<string>" }
+
+type uint64Value struct {
+	Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *uint64Value) Reset()       { *m = uint64Value{} }
+func (*uint64Value) ProtoMessage()  {}
+func (*uint64Value) String() string { return "uint64<string>" }
+
+type int32Value struct {
+	Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *int32Value) Reset()       { *m = int32Value{} }
+func (*int32Value) ProtoMessage()  {}
+func (*int32Value) String() string { return "int32<string>" }
+
+type uint32Value struct {
+	Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *uint32Value) Reset()       { *m = uint32Value{} }
+func (*uint32Value) ProtoMessage()  {}
+func (*uint32Value) String() string { return "uint32<string>" }
+
+type boolValue struct {
+	Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *boolValue) Reset()       { *m = boolValue{} }
+func (*boolValue) ProtoMessage()  {}
+func (*boolValue) String() string { return "bool<string>" }
+
+type stringValue struct {
+	Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *stringValue) Reset()       { *m = stringValue{} }
+func (*stringValue) ProtoMessage()  {}
+func (*stringValue) String() string { return "string<string>" }
+
+type bytesValue struct {
+	Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *bytesValue) Reset()       { *m = bytesValue{} }
+func (*bytesValue) ProtoMessage()  {}
+func (*bytesValue) String() string { return "[]byte<string>" }
+
+func init() {
+	RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue")
+	RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue")
+	RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value")
+	RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value")
+	RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value")
+	RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value")
+	RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue")
+	RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue")
+	RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue")
+}
diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile
new file mode 100644
index 0000000..3496dc9
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile
@@ -0,0 +1,36 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors.  All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+regenerate:
+	go install github.com/gogo/protobuf/protoc-gen-gogo
+	go install github.com/gogo/protobuf/protoc-gen-gostring
+	protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto
+	protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto
diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go
new file mode 100644
index 0000000..a85bf19
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go
@@ -0,0 +1,118 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package descriptor provides functions for obtaining protocol buffer
+// descriptors for generated Go types.
+//
+// These functions cannot go in package proto because they depend on the
+// generated protobuf descriptor messages, which themselves depend on proto.
+package descriptor
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"io/ioutil"
+
+	"github.com/gogo/protobuf/proto"
+)
+
+// extractFile extracts a FileDescriptorProto from a gzip'd buffer.
+func extractFile(gz []byte) (*FileDescriptorProto, error) {
+	r, err := gzip.NewReader(bytes.NewReader(gz))
+	if err != nil {
+		return nil, fmt.Errorf("failed to open gzip reader: %v", err)
+	}
+	defer r.Close()
+
+	b, err := ioutil.ReadAll(r)
+	if err != nil {
+		return nil, fmt.Errorf("failed to uncompress descriptor: %v", err)
+	}
+
+	fd := new(FileDescriptorProto)
+	if err := proto.Unmarshal(b, fd); err != nil {
+		return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err)
+	}
+
+	return fd, nil
+}
+
+// Message is a proto.Message with a method to return its descriptor.
+//
+// Message types generated by the protocol compiler always satisfy
+// the Message interface.
+type Message interface {
+	proto.Message
+	Descriptor() ([]byte, []int)
+}
+
+// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it
+// describing the given message.
+func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) {
+	gz, path := msg.Descriptor()
+	fd, err := extractFile(gz)
+	if err != nil {
+		panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err))
+	}
+
+	md = fd.MessageType[path[0]]
+	for _, i := range path[1:] {
+		md = md.NestedType[i]
+	}
+	return fd, md
+}
+
+// Is this field a scalar numeric type?
+func (field *FieldDescriptorProto) IsScalar() bool {
+	if field.Type == nil {
+		return false
+	}
+	switch *field.Type {
+	case FieldDescriptorProto_TYPE_DOUBLE,
+		FieldDescriptorProto_TYPE_FLOAT,
+		FieldDescriptorProto_TYPE_INT64,
+		FieldDescriptorProto_TYPE_UINT64,
+		FieldDescriptorProto_TYPE_INT32,
+		FieldDescriptorProto_TYPE_FIXED64,
+		FieldDescriptorProto_TYPE_FIXED32,
+		FieldDescriptorProto_TYPE_BOOL,
+		FieldDescriptorProto_TYPE_UINT32,
+		FieldDescriptorProto_TYPE_ENUM,
+		FieldDescriptorProto_TYPE_SFIXED32,
+		FieldDescriptorProto_TYPE_SFIXED64,
+		FieldDescriptorProto_TYPE_SINT32,
+		FieldDescriptorProto_TYPE_SINT64:
+		return true
+	default:
+		return false
+	}
+}
diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
new file mode 100644
index 0000000..ec6eb16
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
@@ -0,0 +1,744 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: descriptor.proto
+
+package descriptor
+
+import fmt "fmt"
+import strings "strings"
+import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
+import sort "sort"
+import strconv "strconv"
+import reflect "reflect"
+import proto "github.com/gogo/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+func (this *FileDescriptorSet) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 5)
+	s = append(s, "&descriptor.FileDescriptorSet{")
+	if this.File != nil {
+		s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *FileDescriptorProto) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 16)
+	s = append(s, "&descriptor.FileDescriptorProto{")
+	if this.Name != nil {
+		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+	}
+	if this.Package != nil {
+		s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n")
+	}
+	if this.Dependency != nil {
+		s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n")
+	}
+	if this.PublicDependency != nil {
+		s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n")
+	}
+	if this.WeakDependency != nil {
+		s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n")
+	}
+	if this.MessageType != nil {
+		s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n")
+	}
+	if this.EnumType != nil {
+		s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
+	}
+	if this.Service != nil {
+		s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n")
+	}
+	if this.Extension != nil {
+		s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
+	}
+	if this.Options != nil {
+		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+	}
+	if this.SourceCodeInfo != nil {
+		s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n")
+	}
+	if this.Syntax != nil {
+		s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *DescriptorProto) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 14)
+	s = append(s, "&descriptor.DescriptorProto{")
+	if this.Name != nil {
+		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+	}
+	if this.Field != nil {
+		s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n")
+	}
+	if this.Extension != nil {
+		s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
+	}
+	if this.NestedType != nil {
+		s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n")
+	}
+	if this.EnumType != nil {
+		s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
+	}
+	if this.ExtensionRange != nil {
+		s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n")
+	}
+	if this.OneofDecl != nil {
+		s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n")
+	}
+	if this.Options != nil {
+		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+	}
+	if this.ReservedRange != nil {
+		s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n")
+	}
+	if this.ReservedName != nil {
+		s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *DescriptorProto_ExtensionRange) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 7)
+	s = append(s, "&descriptor.DescriptorProto_ExtensionRange{")
+	if this.Start != nil {
+		s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
+	}
+	if this.End != nil {
+		s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
+	}
+	if this.Options != nil {
+		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *DescriptorProto_ReservedRange) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 6)
+	s = append(s, "&descriptor.DescriptorProto_ReservedRange{")
+	if this.Start != nil {
+		s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
+	}
+	if this.End != nil {
+		s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *ExtensionRangeOptions) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 5)
+	s = append(s, "&descriptor.ExtensionRangeOptions{")
+	if this.UninterpretedOption != nil {
+		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *FieldDescriptorProto) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 14)
+	s = append(s, "&descriptor.FieldDescriptorProto{")
+	if this.Name != nil {
+		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+	}
+	if this.Number != nil {
+		s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
+	}
+	if this.Label != nil {
+		s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n")
+	}
+	if this.Type != nil {
+		s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n")
+	}
+	if this.TypeName != nil {
+		s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n")
+	}
+	if this.Extendee != nil {
+		s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n")
+	}
+	if this.DefaultValue != nil {
+		s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n")
+	}
+	if this.OneofIndex != nil {
+		s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n")
+	}
+	if this.JsonName != nil {
+		s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n")
+	}
+	if this.Options != nil {
+		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *OneofDescriptorProto) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 6)
+	s = append(s, "&descriptor.OneofDescriptorProto{")
+	if this.Name != nil {
+		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+	}
+	if this.Options != nil {
+		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *EnumDescriptorProto) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 9)
+	s = append(s, "&descriptor.EnumDescriptorProto{")
+	if this.Name != nil {
+		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+	}
+	if this.Value != nil {
+		s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
+	}
+	if this.Options != nil {
+		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+	}
+	if this.ReservedRange != nil {
+		s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n")
+	}
+	if this.ReservedName != nil {
+		s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *EnumDescriptorProto_EnumReservedRange) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 6)
+	s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{")
+	if this.Start != nil {
+		s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
+	}
+	if this.End != nil {
+		s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *EnumValueDescriptorProto) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 7)
+	s = append(s, "&descriptor.EnumValueDescriptorProto{")
+	if this.Name != nil {
+		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+	}
+	if this.Number != nil {
+		s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
+	}
+	if this.Options != nil {
+		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *ServiceDescriptorProto) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 7)
+	s = append(s, "&descriptor.ServiceDescriptorProto{")
+	if this.Name != nil {
+		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+	}
+	if this.Method != nil {
+		s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n")
+	}
+	if this.Options != nil {
+		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *MethodDescriptorProto) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 10)
+	s = append(s, "&descriptor.MethodDescriptorProto{")
+	if this.Name != nil {
+		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+	}
+	if this.InputType != nil {
+		s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n")
+	}
+	if this.OutputType != nil {
+		s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n")
+	}
+	if this.Options != nil {
+		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+	}
+	if this.ClientStreaming != nil {
+		s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n")
+	}
+	if this.ServerStreaming != nil {
+		s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *FileOptions) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 23)
+	s = append(s, "&descriptor.FileOptions{")
+	if this.JavaPackage != nil {
+		s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n")
+	}
+	if this.JavaOuterClassname != nil {
+		s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n")
+	}
+	if this.JavaMultipleFiles != nil {
+		s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n")
+	}
+	if this.JavaGenerateEqualsAndHash != nil {
+		s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n")
+	}
+	if this.JavaStringCheckUtf8 != nil {
+		s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n")
+	}
+	if this.OptimizeFor != nil {
+		s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n")
+	}
+	if this.GoPackage != nil {
+		s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n")
+	}
+	if this.CcGenericServices != nil {
+		s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n")
+	}
+	if this.JavaGenericServices != nil {
+		s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n")
+	}
+	if this.PyGenericServices != nil {
+		s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n")
+	}
+	if this.PhpGenericServices != nil {
+		s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n")
+	}
+	if this.Deprecated != nil {
+		s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+	}
+	if this.CcEnableArenas != nil {
+		s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n")
+	}
+	if this.ObjcClassPrefix != nil {
+		s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n")
+	}
+	if this.CsharpNamespace != nil {
+		s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n")
+	}
+	if this.SwiftPrefix != nil {
+		s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n")
+	}
+	if this.PhpClassPrefix != nil {
+		s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n")
+	}
+	if this.PhpNamespace != nil {
+		s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n")
+	}
+	if this.UninterpretedOption != nil {
+		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *MessageOptions) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 9)
+	s = append(s, "&descriptor.MessageOptions{")
+	if this.MessageSetWireFormat != nil {
+		s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n")
+	}
+	if this.NoStandardDescriptorAccessor != nil {
+		s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n")
+	}
+	if this.Deprecated != nil {
+		s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+	}
+	if this.MapEntry != nil {
+		s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n")
+	}
+	if this.UninterpretedOption != nil {
+		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *FieldOptions) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 11)
+	s = append(s, "&descriptor.FieldOptions{")
+	if this.Ctype != nil {
+		s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n")
+	}
+	if this.Packed != nil {
+		s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n")
+	}
+	if this.Jstype != nil {
+		s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n")
+	}
+	if this.Lazy != nil {
+		s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n")
+	}
+	if this.Deprecated != nil {
+		s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+	}
+	if this.Weak != nil {
+		s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n")
+	}
+	if this.UninterpretedOption != nil {
+		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *OneofOptions) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 5)
+	s = append(s, "&descriptor.OneofOptions{")
+	if this.UninterpretedOption != nil {
+		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *EnumOptions) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 7)
+	s = append(s, "&descriptor.EnumOptions{")
+	if this.AllowAlias != nil {
+		s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n")
+	}
+	if this.Deprecated != nil {
+		s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+	}
+	if this.UninterpretedOption != nil {
+		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *EnumValueOptions) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 6)
+	s = append(s, "&descriptor.EnumValueOptions{")
+	if this.Deprecated != nil {
+		s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+	}
+	if this.UninterpretedOption != nil {
+		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *ServiceOptions) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 6)
+	s = append(s, "&descriptor.ServiceOptions{")
+	if this.Deprecated != nil {
+		s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+	}
+	if this.UninterpretedOption != nil {
+		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *MethodOptions) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 7)
+	s = append(s, "&descriptor.MethodOptions{")
+	if this.Deprecated != nil {
+		s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+	}
+	if this.IdempotencyLevel != nil {
+		s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n")
+	}
+	if this.UninterpretedOption != nil {
+		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *UninterpretedOption) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 11)
+	s = append(s, "&descriptor.UninterpretedOption{")
+	if this.Name != nil {
+		s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
+	}
+	if this.IdentifierValue != nil {
+		s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n")
+	}
+	if this.PositiveIntValue != nil {
+		s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n")
+	}
+	if this.NegativeIntValue != nil {
+		s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n")
+	}
+	if this.DoubleValue != nil {
+		s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n")
+	}
+	if this.StringValue != nil {
+		s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n")
+	}
+	if this.AggregateValue != nil {
+		s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *UninterpretedOption_NamePart) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 6)
+	s = append(s, "&descriptor.UninterpretedOption_NamePart{")
+	if this.NamePart != nil {
+		s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n")
+	}
+	if this.IsExtension != nil {
+		s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *SourceCodeInfo) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 5)
+	s = append(s, "&descriptor.SourceCodeInfo{")
+	if this.Location != nil {
+		s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *SourceCodeInfo_Location) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 9)
+	s = append(s, "&descriptor.SourceCodeInfo_Location{")
+	if this.Path != nil {
+		s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
+	}
+	if this.Span != nil {
+		s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n")
+	}
+	if this.LeadingComments != nil {
+		s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n")
+	}
+	if this.TrailingComments != nil {
+		s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n")
+	}
+	if this.LeadingDetachedComments != nil {
+		s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *GeneratedCodeInfo) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 5)
+	s = append(s, "&descriptor.GeneratedCodeInfo{")
+	if this.Annotation != nil {
+		s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *GeneratedCodeInfo_Annotation) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 8)
+	s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{")
+	if this.Path != nil {
+		s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
+	}
+	if this.SourceFile != nil {
+		s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n")
+	}
+	if this.Begin != nil {
+		s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n")
+	}
+	if this.End != nil {
+		s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func valueToGoStringDescriptor(v interface{}, typ string) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string {
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
+	if e == nil {
+		return "nil"
+	}
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
+	keys := make([]int, 0, len(e))
+	for k := range e {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+	ss := []string{}
+	for _, k := range keys {
+		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
+	}
+	s += strings.Join(ss, ",") + "})"
+	return s
+}
diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go
new file mode 100644
index 0000000..e0846a3
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go
@@ -0,0 +1,390 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package descriptor
+
+import (
+	"strings"
+)
+
+func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) {
+	if !msg.GetOptions().GetMapEntry() {
+		return nil, nil
+	}
+	return msg.GetField()[0], msg.GetField()[1]
+}
+
+func dotToUnderscore(r rune) rune {
+	if r == '.' {
+		return '_'
+	}
+	return r
+}
+
+func (field *FieldDescriptorProto) WireType() (wire int) {
+	switch *field.Type {
+	case FieldDescriptorProto_TYPE_DOUBLE:
+		return 1
+	case FieldDescriptorProto_TYPE_FLOAT:
+		return 5
+	case FieldDescriptorProto_TYPE_INT64:
+		return 0
+	case FieldDescriptorProto_TYPE_UINT64:
+		return 0
+	case FieldDescriptorProto_TYPE_INT32:
+		return 0
+	case FieldDescriptorProto_TYPE_UINT32:
+		return 0
+	case FieldDescriptorProto_TYPE_FIXED64:
+		return 1
+	case FieldDescriptorProto_TYPE_FIXED32:
+		return 5
+	case FieldDescriptorProto_TYPE_BOOL:
+		return 0
+	case FieldDescriptorProto_TYPE_STRING:
+		return 2
+	case FieldDescriptorProto_TYPE_GROUP:
+		return 2
+	case FieldDescriptorProto_TYPE_MESSAGE:
+		return 2
+	case FieldDescriptorProto_TYPE_BYTES:
+		return 2
+	case FieldDescriptorProto_TYPE_ENUM:
+		return 0
+	case FieldDescriptorProto_TYPE_SFIXED32:
+		return 5
+	case FieldDescriptorProto_TYPE_SFIXED64:
+		return 1
+	case FieldDescriptorProto_TYPE_SINT32:
+		return 0
+	case FieldDescriptorProto_TYPE_SINT64:
+		return 0
+	}
+	panic("unreachable")
+}
+
+func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) {
+	packed := field.IsPacked()
+	wireType := field.WireType()
+	fieldNumber := field.GetNumber()
+	if packed {
+		wireType = 2
+	}
+	x = uint64(uint32(fieldNumber)<<3 | uint32(wireType))
+	return x
+}
+
+func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) {
+	packed := field.IsPacked3()
+	wireType := field.WireType()
+	fieldNumber := field.GetNumber()
+	if packed {
+		wireType = 2
+	}
+	x = uint64(uint32(fieldNumber)<<3 | uint32(wireType))
+	return x
+}
+
+func (field *FieldDescriptorProto) GetKey() []byte {
+	x := field.GetKeyUint64()
+	i := 0
+	keybuf := make([]byte, 0)
+	for i = 0; x > 127; i++ {
+		keybuf = append(keybuf, 0x80|uint8(x&0x7F))
+		x >>= 7
+	}
+	keybuf = append(keybuf, uint8(x))
+	return keybuf
+}
+
+func (field *FieldDescriptorProto) GetKey3() []byte {
+	x := field.GetKey3Uint64()
+	i := 0
+	keybuf := make([]byte, 0)
+	for i = 0; x > 127; i++ {
+		keybuf = append(keybuf, 0x80|uint8(x&0x7F))
+		x >>= 7
+	}
+	keybuf = append(keybuf, uint8(x))
+	return keybuf
+}
+
+func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto {
+	msg := desc.GetMessage(packageName, messageName)
+	if msg == nil {
+		return nil
+	}
+	for _, field := range msg.GetField() {
+		if field.GetName() == fieldName {
+			return field
+		}
+	}
+	return nil
+}
+
+func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto {
+	for _, msg := range file.GetMessageType() {
+		if msg.GetName() == typeName {
+			return msg
+		}
+		nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+"."))
+		if nes != nil {
+			return nes
+		}
+	}
+	return nil
+}
+
+func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto {
+	for _, nes := range msg.GetNestedType() {
+		if nes.GetName() == typeName {
+			return nes
+		}
+		res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+"."))
+		if res != nil {
+			return res
+		}
+	}
+	return nil
+}
+
+func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto {
+	for _, file := range desc.GetFile() {
+		if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
+			continue
+		}
+		for _, msg := range file.GetMessageType() {
+			if msg.GetName() == typeName {
+				return msg
+			}
+		}
+		for _, msg := range file.GetMessageType() {
+			for _, nes := range msg.GetNestedType() {
+				if nes.GetName() == typeName {
+					return nes
+				}
+				if msg.GetName()+"."+nes.GetName() == typeName {
+					return nes
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool {
+	for _, file := range desc.GetFile() {
+		if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
+			continue
+		}
+		for _, msg := range file.GetMessageType() {
+			if msg.GetName() == typeName {
+				return file.GetSyntax() == "proto3"
+			}
+		}
+		for _, msg := range file.GetMessageType() {
+			for _, nes := range msg.GetNestedType() {
+				if nes.GetName() == typeName {
+					return file.GetSyntax() == "proto3"
+				}
+				if msg.GetName()+"."+nes.GetName() == typeName {
+					return file.GetSyntax() == "proto3"
+				}
+			}
+		}
+	}
+	return false
+}
+
+func (msg *DescriptorProto) IsExtendable() bool {
+	return len(msg.GetExtensionRange()) > 0
+}
+
+func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) {
+	parent := desc.GetMessage(packageName, typeName)
+	if parent == nil {
+		return "", nil
+	}
+	if !parent.IsExtendable() {
+		return "", nil
+	}
+	extendee := "." + packageName + "." + typeName
+	for _, file := range desc.GetFile() {
+		for _, ext := range file.GetExtension() {
+			if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
+				if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
+					continue
+				}
+			} else {
+				if ext.GetExtendee() != extendee {
+					continue
+				}
+			}
+			if ext.GetName() == fieldName {
+				return file.GetPackage(), ext
+			}
+		}
+	}
+	return "", nil
+}
+
+func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) {
+	parent := desc.GetMessage(packageName, typeName)
+	if parent == nil {
+		return "", nil
+	}
+	if !parent.IsExtendable() {
+		return "", nil
+	}
+	extendee := "." + packageName + "." + typeName
+	for _, file := range desc.GetFile() {
+		for _, ext := range file.GetExtension() {
+			if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
+				if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
+					continue
+				}
+			} else {
+				if ext.GetExtendee() != extendee {
+					continue
+				}
+			}
+			if ext.GetNumber() == fieldNum {
+				return file.GetPackage(), ext
+			}
+		}
+	}
+	return "", nil
+}
+
+func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) {
+	parent := desc.GetMessage(packageName, typeName)
+	if parent == nil {
+		return "", ""
+	}
+	field := parent.GetFieldDescriptor(fieldName)
+	if field == nil {
+		var extPackageName string
+		extPackageName, field = desc.FindExtension(packageName, typeName, fieldName)
+		if field == nil {
+			return "", ""
+		}
+		packageName = extPackageName
+	}
+	typeNames := strings.Split(field.GetTypeName(), ".")
+	if len(typeNames) == 1 {
+		msg := desc.GetMessage(packageName, typeName)
+		if msg == nil {
+			return "", ""
+		}
+		return packageName, msg.GetName()
+	}
+	if len(typeNames) > 2 {
+		for i := 1; i < len(typeNames)-1; i++ {
+			packageName = strings.Join(typeNames[1:len(typeNames)-i], ".")
+			typeName = strings.Join(typeNames[len(typeNames)-i:], ".")
+			msg := desc.GetMessage(packageName, typeName)
+			if msg != nil {
+				typeNames := strings.Split(msg.GetName(), ".")
+				if len(typeNames) == 1 {
+					return packageName, msg.GetName()
+				}
+				return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1]
+			}
+		}
+	}
+	return "", ""
+}
+
+func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto {
+	for _, field := range msg.GetField() {
+		if field.GetName() == fieldName {
+			return field
+		}
+	}
+	return nil
+}
+
+func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto {
+	for _, file := range desc.GetFile() {
+		if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
+			continue
+		}
+		for _, enum := range file.GetEnumType() {
+			if enum.GetName() == typeName {
+				return enum
+			}
+		}
+	}
+	return nil
+}
+
+func (f *FieldDescriptorProto) IsEnum() bool {
+	return *f.Type == FieldDescriptorProto_TYPE_ENUM
+}
+
+func (f *FieldDescriptorProto) IsMessage() bool {
+	return *f.Type == FieldDescriptorProto_TYPE_MESSAGE
+}
+
+func (f *FieldDescriptorProto) IsBytes() bool {
+	return *f.Type == FieldDescriptorProto_TYPE_BYTES
+}
+
+func (f *FieldDescriptorProto) IsRepeated() bool {
+	return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED
+}
+
+func (f *FieldDescriptorProto) IsString() bool {
+	return *f.Type == FieldDescriptorProto_TYPE_STRING
+}
+
+func (f *FieldDescriptorProto) IsBool() bool {
+	return *f.Type == FieldDescriptorProto_TYPE_BOOL
+}
+
+func (f *FieldDescriptorProto) IsRequired() bool {
+	return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED
+}
+
+func (f *FieldDescriptorProto) IsPacked() bool {
+	return f.Options != nil && f.GetOptions().GetPacked()
+}
+
+func (f *FieldDescriptorProto) IsPacked3() bool {
+	if f.IsRepeated() && f.IsScalar() {
+		if f.Options == nil || f.GetOptions().Packed == nil {
+			return true
+		}
+		return f.Options != nil && f.GetOptions().GetPacked()
+	}
+	return false
+}
+
+func (m *DescriptorProto) HasExtension() bool {
+	return len(m.ExtensionRange) > 0
+}
diff --git a/vendor/github.com/golang-collections/go-datastructures/LICENSE b/vendor/github.com/golang-collections/go-datastructures/LICENSE
new file mode 100644
index 0000000..7a4a3ea
--- /dev/null
+++ b/vendor/github.com/golang-collections/go-datastructures/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
\ No newline at end of file
diff --git a/vendor/github.com/golang-collections/go-datastructures/queue/error.go b/vendor/github.com/golang-collections/go-datastructures/queue/error.go
new file mode 100644
index 0000000..29c062c
--- /dev/null
+++ b/vendor/github.com/golang-collections/go-datastructures/queue/error.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2014 Workiva, LLC
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package queue
+
+import "errors"
+
+var disposedError = errors.New(`Queue has been disposed.`)
diff --git a/vendor/github.com/golang-collections/go-datastructures/queue/priority_queue.go b/vendor/github.com/golang-collections/go-datastructures/queue/priority_queue.go
new file mode 100644
index 0000000..3ccfd57
--- /dev/null
+++ b/vendor/github.com/golang-collections/go-datastructures/queue/priority_queue.go
@@ -0,0 +1,235 @@
+/*
+Copyright 2014 Workiva, LLC
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+The priority queue is almost a spitting image of the logic
+used for a regular queue.  In order to keep the logic fast,
+this code is repeated instead of using casts to cast to interface{}
+back and forth.  If Go had inheritance and generics, this problem
+would be easier to solve.
+*/
+package queue
+
+import (
+	"sort"
+	"sync"
+)
+
+// Item is an item that can be added to the priority queue.
+type Item interface {
+	// Compare returns a bool that can be used to determine
+	// ordering in the priority queue.  Assuming the queue
+	// is in ascending order, this should return > logic.
+	// Return 1 to indicate this object is greater than the
+	// the other logic, 0 to indicate equality, and -1 to indicate
+	// less than other.
+	Compare(other Item) int
+}
+
+type priorityItems []Item
+
+func (items *priorityItems) get(number int) []Item {
+	returnItems := make([]Item, 0, number)
+	index := 0
+	for i := 0; i < number; i++ {
+		if i >= len(*items) {
+			break
+		}
+
+		returnItems = append(returnItems, (*items)[i])
+		(*items)[i] = nil
+		index++
+	}
+
+	*items = (*items)[index:]
+	return returnItems
+}
+
+func (items *priorityItems) insert(item Item) {
+	if len(*items) == 0 {
+		*items = append(*items, item)
+		return
+	}
+
+	equalFound := false
+	i := sort.Search(len(*items), func(i int) bool {
+		result := (*items)[i].Compare(item)
+		if result == 0 {
+			equalFound = true
+		}
+		return result >= 0
+	})
+
+	if equalFound {
+		return
+	}
+
+	if i == len(*items) {
+		*items = append(*items, item)
+		return
+	}
+
+	*items = append(*items, nil)
+	copy((*items)[i+1:], (*items)[i:])
+	(*items)[i] = item
+}
+
+// PriorityQueue is similar to queue except that it takes
+// items that implement the Item interface and adds them
+// to the queue in priority order.
+type PriorityQueue struct {
+	waiters     waiters
+	items       priorityItems
+	lock        sync.Mutex
+	disposeLock sync.Mutex
+	disposed    bool
+}
+
+// Put adds items to the queue.
+func (pq *PriorityQueue) Put(items ...Item) error {
+	if len(items) == 0 {
+		return nil
+	}
+
+	pq.lock.Lock()
+	if pq.disposed {
+		pq.lock.Unlock()
+		return disposedError
+	}
+
+	for _, item := range items {
+		pq.items.insert(item)
+	}
+
+	for {
+		sema := pq.waiters.get()
+		if sema == nil {
+			break
+		}
+
+		sema.response.Add(1)
+		sema.wg.Done()
+		sema.response.Wait()
+		if len(pq.items) == 0 {
+			break
+		}
+	}
+
+	pq.lock.Unlock()
+	return nil
+}
+
+// Get retrieves items from the queue.  If the queue is empty,
+// this call blocks until the next item is added to the queue.  This
+// will attempt to retrieve number of items.
+func (pq *PriorityQueue) Get(number int) ([]Item, error) {
+	if number < 1 {
+		return nil, nil
+	}
+
+	pq.lock.Lock()
+
+	if pq.disposed {
+		pq.lock.Unlock()
+		return nil, disposedError
+	}
+
+	var items []Item
+
+	if len(pq.items) == 0 {
+		sema := newSema()
+		pq.waiters.put(sema)
+		sema.wg.Add(1)
+		pq.lock.Unlock()
+
+		sema.wg.Wait()
+		pq.disposeLock.Lock()
+		if pq.disposed {
+			pq.disposeLock.Unlock()
+			return nil, disposedError
+		}
+		pq.disposeLock.Unlock()
+
+		items = pq.items.get(number)
+		sema.response.Done()
+		return items, nil
+	}
+
+	items = pq.items.get(number)
+	pq.lock.Unlock()
+	return items, nil
+}
+
+// Peek will look at the next item without removing it from the queue.
+func (pq *PriorityQueue) Peek() Item {
+	pq.lock.Lock()
+	defer pq.lock.Unlock()
+	if len(pq.items) > 0 {
+		return pq.items[0]
+	}
+	return nil
+}
+
+// Empty returns a bool indicating if there are any items left
+// in the queue.
+func (pq *PriorityQueue) Empty() bool {
+	pq.lock.Lock()
+	defer pq.lock.Unlock()
+
+	return len(pq.items) == 0
+}
+
+// Len returns a number indicating how many items are in the queue.
+func (pq *PriorityQueue) Len() int {
+	pq.lock.Lock()
+	defer pq.lock.Unlock()
+
+	return len(pq.items)
+}
+
+// Disposed returns a bool indicating if this queue has been disposed.
+func (pq *PriorityQueue) Disposed() bool {
+	pq.lock.Lock()
+	defer pq.lock.Unlock()
+
+	return pq.disposed
+}
+
+// Dispose will prevent any further reads/writes to this queue
+// and frees available resources.
+func (pq *PriorityQueue) Dispose() {
+	pq.lock.Lock()
+	defer pq.lock.Unlock()
+
+	pq.disposeLock.Lock()
+	defer pq.disposeLock.Unlock()
+
+	pq.disposed = true
+	for _, waiter := range pq.waiters {
+		waiter.response.Add(1)
+		waiter.wg.Done()
+	}
+
+	pq.items = nil
+	pq.waiters = nil
+}
+
+// NewPriorityQueue is the constructor for a priority queue.
+func NewPriorityQueue(hint int) *PriorityQueue {
+	return &PriorityQueue{
+		items: make(priorityItems, 0, hint),
+	}
+}
diff --git a/vendor/github.com/golang-collections/go-datastructures/queue/queue.go b/vendor/github.com/golang-collections/go-datastructures/queue/queue.go
new file mode 100644
index 0000000..856ae3e
--- /dev/null
+++ b/vendor/github.com/golang-collections/go-datastructures/queue/queue.go
@@ -0,0 +1,324 @@
+/*
+Copyright 2014 Workiva, LLC
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package queue includes a regular queue and a priority queue.
+These queues rely on waitgroups to pause listening threads
+on empty queues until a message is received.  If any thread
+calls Dispose on the queue, any listeners are immediately returned
+with an error.  Any subsequent put to the queue will return an error
+as opposed to panicking as with channels.  Queues will grow with unbounded
+behavior as opposed to channels which can be buffered but will pause
+while a thread attempts to put to a full channel.
+
+Recently added is a lockless ring buffer using the same basic C design as
+found here:
+
+http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
+
+Modified for use with Go with the addition of some dispose semantics providing
+the capability to release blocked threads.  This works for both puts
+and gets, either will return an error if they are blocked and the buffer
+is disposed.  This could serve as a signal to kill a goroutine.  All threadsafety
+is acheived using CAS operations, making this buffer pretty quick.
+
+Benchmarks:
+BenchmarkPriorityQueue-8	 		2000000	       782 ns/op
+BenchmarkQueue-8	 		 		2000000	       671 ns/op
+BenchmarkChannel-8	 		 		1000000	      2083 ns/op
+BenchmarkQueuePut-8	   		   		20000	     84299 ns/op
+BenchmarkQueueGet-8	   		   		20000	     80753 ns/op
+BenchmarkExecuteInParallel-8	    20000	     68891 ns/op
+BenchmarkRBLifeCycle-8				10000000	       177 ns/op
+BenchmarkRBPut-8					30000000	        58.1 ns/op
+BenchmarkRBGet-8					50000000	        26.8 ns/op
+
+TODO: We really need a Fibonacci heap for the priority queue.
+TODO: Unify the types of queue to the same interface.
+*/
+package queue
+
+import (
+	"runtime"
+	"sync"
+	"sync/atomic"
+)
+
+type waiters []*sema
+
+func (w *waiters) get() *sema {
+	if len(*w) == 0 {
+		return nil
+	}
+
+	sema := (*w)[0]
+	copy((*w)[0:], (*w)[1:])
+	(*w)[len(*w)-1] = nil // or the zero value of T
+	*w = (*w)[:len(*w)-1]
+	return sema
+}
+
+func (w *waiters) put(sema *sema) {
+	*w = append(*w, sema)
+}
+
+type items []interface{}
+
+func (items *items) get(number int64) []interface{} {
+	returnItems := make([]interface{}, 0, number)
+	index := int64(0)
+	for i := int64(0); i < number; i++ {
+		if i >= int64(len(*items)) {
+			break
+		}
+
+		returnItems = append(returnItems, (*items)[i])
+		(*items)[i] = nil
+		index++
+	}
+
+	*items = (*items)[index:]
+	return returnItems
+}
+
+func (items *items) getUntil(checker func(item interface{}) bool) []interface{} {
+	length := len(*items)
+
+	if len(*items) == 0 {
+		// returning nil here actually wraps that nil in a list
+		// of interfaces... thanks go
+		return []interface{}{}
+	}
+
+	returnItems := make([]interface{}, 0, length)
+	index := 0
+	for i, item := range *items {
+		if !checker(item) {
+			break
+		}
+
+		returnItems = append(returnItems, item)
+		index = i
+	}
+
+	*items = (*items)[index:]
+	return returnItems
+}
+
+type sema struct {
+	wg       *sync.WaitGroup
+	response *sync.WaitGroup
+}
+
+func newSema() *sema {
+	return &sema{
+		wg:       &sync.WaitGroup{},
+		response: &sync.WaitGroup{},
+	}
+}
+
+// Queue is the struct responsible for tracking the state
+// of the queue.
+type Queue struct {
+	waiters  waiters
+	items    items
+	lock     sync.Mutex
+	disposed bool
+}
+
+// Put will add the specified items to the queue.
+func (q *Queue) Put(items ...interface{}) error {
+	if len(items) == 0 {
+		return nil
+	}
+
+	q.lock.Lock()
+
+	if q.disposed {
+		q.lock.Unlock()
+		return disposedError
+	}
+
+	q.items = append(q.items, items...)
+	for {
+		sema := q.waiters.get()
+		if sema == nil {
+			break
+		}
+		sema.response.Add(1)
+		sema.wg.Done()
+		sema.response.Wait()
+		if len(q.items) == 0 {
+			break
+		}
+	}
+
+	q.lock.Unlock()
+	return nil
+}
+
+// Get will add an item to the queue.  If there are some items in the
+// queue, get will return a number UP TO the number passed in as a
+// parameter.  If no items are in the queue, this method will pause
+// until items are added to the queue.
+func (q *Queue) Get(number int64) ([]interface{}, error) {
+	if number < 1 {
+		// thanks again go
+		return []interface{}{}, nil
+	}
+
+	q.lock.Lock()
+
+	if q.disposed {
+		q.lock.Unlock()
+		return nil, disposedError
+	}
+
+	var items []interface{}
+
+	if len(q.items) == 0 {
+		sema := newSema()
+		q.waiters.put(sema)
+		sema.wg.Add(1)
+		q.lock.Unlock()
+
+		sema.wg.Wait()
+		// we are now inside the put's lock
+		if q.disposed {
+			return nil, disposedError
+		}
+		items = q.items.get(number)
+		sema.response.Done()
+		return items, nil
+	}
+
+	items = q.items.get(number)
+	q.lock.Unlock()
+	return items, nil
+}
+
+// TakeUntil takes a function and returns a list of items that
+// match the checker until the checker returns false.  This does not
+// wait if there are no items in the queue.
+func (q *Queue) TakeUntil(checker func(item interface{}) bool) ([]interface{}, error) {
+	if checker == nil {
+		return nil, nil
+	}
+
+	q.lock.Lock()
+
+	if q.disposed {
+		q.lock.Unlock()
+		return nil, disposedError
+	}
+
+	result := q.items.getUntil(checker)
+	q.lock.Unlock()
+	return result, nil
+}
+
+// Empty returns a bool indicating if this bool is empty.
+func (q *Queue) Empty() bool {
+	q.lock.Lock()
+	defer q.lock.Unlock()
+
+	return len(q.items) == 0
+}
+
+// Len returns the number of items in this queue.
+func (q *Queue) Len() int64 {
+	q.lock.Lock()
+	defer q.lock.Unlock()
+
+	return int64(len(q.items))
+}
+
+// Disposed returns a bool indicating if this queue
+// has had disposed called on it.
+func (q *Queue) Disposed() bool {
+	q.lock.Lock()
+	defer q.lock.Unlock()
+
+	return q.disposed
+}
+
+// Dispose will dispose of this queue.  Any subsequent
+// calls to Get or Put will return an error.
+func (q *Queue) Dispose() {
+	q.lock.Lock()
+	defer q.lock.Unlock()
+
+	q.disposed = true
+	for _, waiter := range q.waiters {
+		waiter.response.Add(1)
+		waiter.wg.Done()
+	}
+
+	q.items = nil
+	q.waiters = nil
+}
+
+// New is a constructor for a new threadsafe queue.
+func New(hint int64) *Queue {
+	return &Queue{
+		items: make([]interface{}, 0, hint),
+	}
+}
+
+// ExecuteInParallel will (in parallel) call the provided function
+// with each item in the queue until the queue is exhausted.  When the queue
+// is exhausted execution is complete and all goroutines will be killed.
+// This means that the queue will be disposed so cannot be used again.
+func ExecuteInParallel(q *Queue, fn func(interface{})) {
+	if q == nil {
+		return
+	}
+
+	q.lock.Lock() // so no one touches anything in the middle
+	// of this process
+	todo, done := uint64(len(q.items)), int64(-1)
+	// this is important or we might face an infinite loop
+	if todo == 0 {
+		return
+	}
+
+	numCPU := 1
+	if runtime.NumCPU() > 1 {
+		numCPU = runtime.NumCPU() - 1
+	}
+
+	var wg sync.WaitGroup
+	wg.Add(numCPU)
+	items := q.items
+
+	for i := 0; i < numCPU; i++ {
+		go func() {
+			for {
+				index := atomic.AddInt64(&done, 1)
+				if index >= int64(todo) {
+					wg.Done()
+					break
+				}
+
+				fn(items[index])
+				items[index] = 0
+			}
+		}()
+	}
+	wg.Wait()
+	q.lock.Unlock()
+	q.Dispose()
+}
diff --git a/vendor/github.com/golang-collections/go-datastructures/queue/ring.go b/vendor/github.com/golang-collections/go-datastructures/queue/ring.go
new file mode 100644
index 0000000..9c137a9
--- /dev/null
+++ b/vendor/github.com/golang-collections/go-datastructures/queue/ring.go
@@ -0,0 +1,158 @@
+/*
+Copyright 2014 Workiva, LLC
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package queue
+
+import (
+	"runtime"
+	"sync/atomic"
+)
+
+// roundUp takes a uint64 greater than 0 and rounds it up to the next
+// power of 2.
+func roundUp(v uint64) uint64 {
+	v--
+	v |= v >> 1
+	v |= v >> 2
+	v |= v >> 4
+	v |= v >> 8
+	v |= v >> 16
+	v |= v >> 32
+	v++
+	return v
+}
+
+type node struct {
+	position uint64
+	data     interface{}
+}
+
+type nodes []*node
+
+// RingBuffer is a MPMC buffer that achieves threadsafety with CAS operations
+// only.  A put on full or get on empty call will block until an item
+// is put or retrieved.  Calling Dispose on the RingBuffer will unblock
+// any blocked threads with an error.  This buffer is similar to the buffer
+// described here: http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
+// with some minor additions.
+type RingBuffer struct {
+	nodes                          nodes
+	queue, dequeue, mask, disposed uint64
+}
+
+func (rb *RingBuffer) init(size uint64) {
+	size = roundUp(size)
+	rb.nodes = make(nodes, size)
+	for i := uint64(0); i < size; i++ {
+		rb.nodes[i] = &node{position: i}
+	}
+	rb.mask = size - 1 // so we don't have to do this with every put/get operation
+}
+
+// Put adds the provided item to the queue.  If the queue is full, this
+// call will block until an item is added to the queue or Dispose is called
+// on the queue.  An error will be returned if the queue is disposed.
+func (rb *RingBuffer) Put(item interface{}) error {
+	var n *node
+	pos := atomic.LoadUint64(&rb.queue)
+L:
+	for {
+		if atomic.LoadUint64(&rb.disposed) == 1 {
+			return disposedError
+		}
+
+		n = rb.nodes[pos&rb.mask]
+		seq := atomic.LoadUint64(&n.position)
+		switch dif := seq - pos; {
+		case dif == 0:
+			if atomic.CompareAndSwapUint64(&rb.queue, pos, pos+1) {
+				break L
+			}
+		case dif < 0:
+			panic(`Ring buffer in a compromised state during a put operation.`)
+		default:
+			pos = atomic.LoadUint64(&rb.queue)
+		}
+		runtime.Gosched() // free up the cpu before the next iteration
+	}
+
+	n.data = item
+	atomic.StoreUint64(&n.position, pos+1)
+	return nil
+}
+
+// Get will return the next item in the queue.  This call will block
+// if the queue is empty.  This call will unblock when an item is added
+// to the queue or Dispose is called on the queue.  An error will be returned
+// if the queue is disposed.
+func (rb *RingBuffer) Get() (interface{}, error) {
+	var n *node
+	pos := atomic.LoadUint64(&rb.dequeue)
+L:
+	for {
+		if atomic.LoadUint64(&rb.disposed) == 1 {
+			return nil, disposedError
+		}
+
+		n = rb.nodes[pos&rb.mask]
+		seq := atomic.LoadUint64(&n.position)
+		switch dif := seq - (pos + 1); {
+		case dif == 0:
+			if atomic.CompareAndSwapUint64(&rb.dequeue, pos, pos+1) {
+				break L
+			}
+		case dif < 0:
+			panic(`Ring buffer in compromised state during a get operation.`)
+		default:
+			pos = atomic.LoadUint64(&rb.dequeue)
+		}
+		runtime.Gosched() // free up cpu before next iteration
+	}
+	data := n.data
+	n.data = nil
+	atomic.StoreUint64(&n.position, pos+rb.mask+1)
+	return data, nil
+}
+
+// Len returns the number of items in the queue.
+func (rb *RingBuffer) Len() uint64 {
+	return atomic.LoadUint64(&rb.queue) - atomic.LoadUint64(&rb.dequeue)
+}
+
+// Cap returns the capacity of this ring buffer.
+func (rb *RingBuffer) Cap() uint64 {
+	return uint64(len(rb.nodes))
+}
+
+// Dispose will dispose of this queue and free any blocked threads
+// in the Put and/or Get methods.  Calling those methods on a disposed
+// queue will return an error.
+func (rb *RingBuffer) Dispose() {
+	atomic.CompareAndSwapUint64(&rb.disposed, 0, 1)
+}
+
+// IsDisposed will return a bool indicating if this queue has been
+// disposed.
+func (rb *RingBuffer) IsDisposed() bool {
+	return atomic.LoadUint64(&rb.disposed) == 1
+}
+
+// NewRingBuffer will allocate, initialize, and return a ring buffer
+// with the specified size.
+func NewRingBuffer(size uint64) *RingBuffer {
+	rb := &RingBuffer{}
+	rb.init(size)
+	return rb
+}
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 0000000..0f64693
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,28 @@
+Copyright 2010 The Go Authors.  All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+    * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor.go b/vendor/github.com/golang/protobuf/descriptor/descriptor.go
new file mode 100644
index 0000000..ac7e51b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/descriptor/descriptor.go
@@ -0,0 +1,93 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package descriptor provides functions for obtaining protocol buffer
+// descriptors for generated Go types.
+//
+// These functions cannot go in package proto because they depend on the
+// generated protobuf descriptor messages, which themselves depend on proto.
+package descriptor
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"io/ioutil"
+
+	"github.com/golang/protobuf/proto"
+	protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+// extractFile extracts a FileDescriptorProto from a gzip'd buffer.
+func extractFile(gz []byte) (*protobuf.FileDescriptorProto, error) {
+	r, err := gzip.NewReader(bytes.NewReader(gz))
+	if err != nil {
+		return nil, fmt.Errorf("failed to open gzip reader: %v", err)
+	}
+	defer r.Close()
+
+	b, err := ioutil.ReadAll(r)
+	if err != nil {
+		return nil, fmt.Errorf("failed to uncompress descriptor: %v", err)
+	}
+
+	fd := new(protobuf.FileDescriptorProto)
+	if err := proto.Unmarshal(b, fd); err != nil {
+		return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err)
+	}
+
+	return fd, nil
+}
+
+// Message is a proto.Message with a method to return its descriptor.
+//
+// Message types generated by the protocol compiler always satisfy
+// the Message interface.
+type Message interface {
+	proto.Message
+	Descriptor() ([]byte, []int)
+}
+
+// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it
+// describing the given message.
+func ForMessage(msg Message) (fd *protobuf.FileDescriptorProto, md *protobuf.DescriptorProto) {
+	gz, path := msg.Descriptor()
+	fd, err := extractFile(gz)
+	if err != nil {
+		panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err))
+	}
+
+	md = fd.MessageType[path[0]]
+	for _, i := range path[1:] {
+		md = md.NestedType[i]
+	}
+	return fd, md
+}
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 0000000..3cd3249
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,253 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+	"fmt"
+	"log"
+	"reflect"
+	"strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(src Message) Message {
+	in := reflect.ValueOf(src)
+	if in.IsNil() {
+		return src
+	}
+	out := reflect.New(in.Type().Elem())
+	dst := out.Interface().(Message)
+	Merge(dst, src)
+	return dst
+}
+
+// Merger is the interface representing objects that can merge messages of the same type.
+type Merger interface {
+	// Merge merges src into this message.
+	// Required and optional fields that are set in src will be set to that value in dst.
+	// Elements of repeated fields will be appended.
+	//
+	// Merge may panic if called with a different argument type than the receiver.
+	Merge(src Message)
+}
+
+// generatedMerger is the custom merge method that generated protos will have.
+// We must add this method since a generate Merge method will conflict with
+// many existing protos that have a Merge data field already defined.
+type generatedMerger interface {
+	XXX_Merge(src Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+	if m, ok := dst.(Merger); ok {
+		m.Merge(src)
+		return
+	}
+
+	in := reflect.ValueOf(src)
+	out := reflect.ValueOf(dst)
+	if out.IsNil() {
+		panic("proto: nil destination")
+	}
+	if in.Type() != out.Type() {
+		panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
+	}
+	if in.IsNil() {
+		return // Merge from nil src is a noop
+	}
+	if m, ok := dst.(generatedMerger); ok {
+		m.XXX_Merge(src)
+		return
+	}
+	mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+	sprop := GetProperties(in.Type())
+	for i := 0; i < in.NumField(); i++ {
+		f := in.Type().Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+	}
+
+	if emIn, err := extendable(in.Addr().Interface()); err == nil {
+		emOut, _ := extendable(out.Addr().Interface())
+		mIn, muIn := emIn.extensionsRead()
+		if mIn != nil {
+			mOut := emOut.extensionsWrite()
+			muIn.Lock()
+			mergeExtension(mOut, mIn)
+			muIn.Unlock()
+		}
+	}
+
+	uf := in.FieldByName("XXX_unrecognized")
+	if !uf.IsValid() {
+		return
+	}
+	uin := uf.Bytes()
+	if len(uin) > 0 {
+		out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+	}
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+	if in.Type() == protoMessageType {
+		if !in.IsNil() {
+			if out.IsNil() {
+				out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+			} else {
+				Merge(out.Interface().(Message), in.Interface().(Message))
+			}
+		}
+		return
+	}
+	switch in.Kind() {
+	case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+		reflect.String, reflect.Uint32, reflect.Uint64:
+		if !viaPtr && isProto3Zero(in) {
+			return
+		}
+		out.Set(in)
+	case reflect.Interface:
+		// Probably a oneof field; copy non-nil values.
+		if in.IsNil() {
+			return
+		}
+		// Allocate destination if it is not set, or set to a different type.
+		// Otherwise we will merge as normal.
+		if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+			out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+		}
+		mergeAny(out.Elem(), in.Elem(), false, nil)
+	case reflect.Map:
+		if in.Len() == 0 {
+			return
+		}
+		if out.IsNil() {
+			out.Set(reflect.MakeMap(in.Type()))
+		}
+		// For maps with value types of *T or []byte we need to deep copy each value.
+		elemKind := in.Type().Elem().Kind()
+		for _, key := range in.MapKeys() {
+			var val reflect.Value
+			switch elemKind {
+			case reflect.Ptr:
+				val = reflect.New(in.Type().Elem().Elem())
+				mergeAny(val, in.MapIndex(key), false, nil)
+			case reflect.Slice:
+				val = in.MapIndex(key)
+				val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+			default:
+				val = in.MapIndex(key)
+			}
+			out.SetMapIndex(key, val)
+		}
+	case reflect.Ptr:
+		if in.IsNil() {
+			return
+		}
+		if out.IsNil() {
+			out.Set(reflect.New(in.Elem().Type()))
+		}
+		mergeAny(out.Elem(), in.Elem(), true, nil)
+	case reflect.Slice:
+		if in.IsNil() {
+			return
+		}
+		if in.Type().Elem().Kind() == reflect.Uint8 {
+			// []byte is a scalar bytes field, not a repeated field.
+
+			// Edge case: if this is in a proto3 message, a zero length
+			// bytes field is considered the zero value, and should not
+			// be merged.
+			if prop != nil && prop.proto3 && in.Len() == 0 {
+				return
+			}
+
+			// Make a deep copy.
+			// Append to []byte{} instead of []byte(nil) so that we never end up
+			// with a nil result.
+			out.SetBytes(append([]byte{}, in.Bytes()...))
+			return
+		}
+		n := in.Len()
+		if out.IsNil() {
+			out.Set(reflect.MakeSlice(in.Type(), 0, n))
+		}
+		switch in.Type().Elem().Kind() {
+		case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+			reflect.String, reflect.Uint32, reflect.Uint64:
+			out.Set(reflect.AppendSlice(out, in))
+		default:
+			for i := 0; i < n; i++ {
+				x := reflect.Indirect(reflect.New(in.Type().Elem()))
+				mergeAny(x, in.Index(i), false, nil)
+				out.Set(reflect.Append(out, x))
+			}
+		}
+	case reflect.Struct:
+		mergeStruct(out, in)
+	default:
+		// unknown type, so not a protocol buffer
+		log.Printf("proto: don't know how to copy %v", in)
+	}
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+	for extNum, eIn := range in {
+		eOut := Extension{desc: eIn.desc}
+		if eIn.value != nil {
+			v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+			mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+			eOut.value = v.Interface()
+		}
+		if eIn.enc != nil {
+			eOut.enc = make([]byte, len(eIn.enc))
+			copy(eOut.enc, eIn.enc)
+		}
+
+		out[extNum] = eOut
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 0000000..d9aa3c4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,428 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"io"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+	for shift := uint(0); shift < 64; shift += 7 {
+		if n >= len(buf) {
+			return 0, 0
+		}
+		b := uint64(buf[n])
+		n++
+		x |= (b & 0x7F) << shift
+		if (b & 0x80) == 0 {
+			return x, n
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+	i := p.index
+	l := len(p.buf)
+
+	for shift := uint(0); shift < 64; shift += 7 {
+		if i >= l {
+			err = io.ErrUnexpectedEOF
+			return
+		}
+		b := p.buf[i]
+		i++
+		x |= (uint64(b) & 0x7F) << shift
+		if b < 0x80 {
+			p.index = i
+			return
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	err = errOverflow
+	return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+	i := p.index
+	buf := p.buf
+
+	if i >= len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	} else if buf[i] < 0x80 {
+		p.index++
+		return uint64(buf[i]), nil
+	} else if len(buf)-i < 10 {
+		return p.decodeVarintSlow()
+	}
+
+	var b uint64
+	// we already checked the first byte
+	x = uint64(buf[i]) - 0x80
+	i++
+
+	b = uint64(buf[i])
+	i++
+	x += b << 7
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 7
+
+	b = uint64(buf[i])
+	i++
+	x += b << 14
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 14
+
+	b = uint64(buf[i])
+	i++
+	x += b << 21
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 21
+
+	b = uint64(buf[i])
+	i++
+	x += b << 28
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 28
+
+	b = uint64(buf[i])
+	i++
+	x += b << 35
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 35
+
+	b = uint64(buf[i])
+	i++
+	x += b << 42
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 42
+
+	b = uint64(buf[i])
+	i++
+	x += b << 49
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 49
+
+	b = uint64(buf[i])
+	i++
+	x += b << 56
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 56
+
+	b = uint64(buf[i])
+	i++
+	x += b << 63
+	if b&0x80 == 0 {
+		goto done
+	}
+	// x -= 0x80 << 63 // Always zero.
+
+	return 0, errOverflow
+
+done:
+	p.index = i
+	return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+	// x, err already 0
+	i := p.index + 8
+	if i < 0 || i > len(p.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	p.index = i
+
+	x = uint64(p.buf[i-8])
+	x |= uint64(p.buf[i-7]) << 8
+	x |= uint64(p.buf[i-6]) << 16
+	x |= uint64(p.buf[i-5]) << 24
+	x |= uint64(p.buf[i-4]) << 32
+	x |= uint64(p.buf[i-3]) << 40
+	x |= uint64(p.buf[i-2]) << 48
+	x |= uint64(p.buf[i-1]) << 56
+	return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+	// x, err already 0
+	i := p.index + 4
+	if i < 0 || i > len(p.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	p.index = i
+
+	x = uint64(p.buf[i-4])
+	x |= uint64(p.buf[i-3]) << 8
+	x |= uint64(p.buf[i-2]) << 16
+	x |= uint64(p.buf[i-1]) << 24
+	return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+	x, err = p.DecodeVarint()
+	if err != nil {
+		return
+	}
+	x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+	return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from  the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+	x, err = p.DecodeVarint()
+	if err != nil {
+		return
+	}
+	x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+	return
+}
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+	n, err := p.DecodeVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	nb := int(n)
+	if nb < 0 {
+		return nil, fmt.Errorf("proto: bad byte length %d", nb)
+	}
+	end := p.index + nb
+	if end < p.index || end > len(p.buf) {
+		return nil, io.ErrUnexpectedEOF
+	}
+
+	if !alloc {
+		// todo: check if can get more uses of alloc=false
+		buf = p.buf[p.index:end]
+		p.index += nb
+		return
+	}
+
+	buf = make([]byte, nb)
+	copy(buf, p.buf[p.index:])
+	p.index += nb
+	return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+	buf, err := p.DecodeRawBytes(false)
+	if err != nil {
+		return
+	}
+	return string(buf), nil
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves.  The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+// Unmarshal implementations should not clear the receiver.
+// Any unmarshaled data should be merged into the receiver.
+// Callers of Unmarshal that do not want to retain existing data
+// should Reset the receiver before calling Unmarshal.
+type Unmarshaler interface {
+	Unmarshal([]byte) error
+}
+
+// newUnmarshaler is the interface representing objects that can
+// unmarshal themselves. The semantics are identical to Unmarshaler.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newUnmarshaler interface {
+	XXX_Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb.  If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+	pb.Reset()
+	if u, ok := pb.(newUnmarshaler); ok {
+		return u.XXX_Unmarshal(buf)
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		return u.Unmarshal(buf)
+	}
+	return NewBuffer(buf).Unmarshal(pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb.  If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+	if u, ok := pb.(newUnmarshaler); ok {
+		return u.XXX_Unmarshal(buf)
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		// NOTE: The history of proto have unfortunately been inconsistent
+		// whether Unmarshaler should or should not implicitly clear itself.
+		// Some implementations do, most do not.
+		// Thus, calling this here may or may not do what people want.
+		//
+		// See https://github.com/golang/protobuf/issues/424
+		return u.Unmarshal(buf)
+	}
+	return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+	enc, err := p.DecodeRawBytes(false)
+	if err != nil {
+		return err
+	}
+	return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+// StartGroup tag is already consumed. This function consumes
+// EndGroup tag.
+func (p *Buffer) DecodeGroup(pb Message) error {
+	b := p.buf[p.index:]
+	x, y := findEndGroup(b)
+	if x < 0 {
+		return io.ErrUnexpectedEOF
+	}
+	err := Unmarshal(b[:x], pb)
+	p.index += y
+	return err
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb.  If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+	// If the object can unmarshal itself, let it.
+	if u, ok := pb.(newUnmarshaler); ok {
+		err := u.XXX_Unmarshal(p.buf[p.index:])
+		p.index = len(p.buf)
+		return err
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		// NOTE: The history of proto have unfortunately been inconsistent
+		// whether Unmarshaler should or should not implicitly clear itself.
+		// Some implementations do, most do not.
+		// Thus, calling this here may or may not do what people want.
+		//
+		// See https://github.com/golang/protobuf/issues/424
+		err := u.Unmarshal(p.buf[p.index:])
+		p.index = len(p.buf)
+		return err
+	}
+
+	// Slow workaround for messages that aren't Unmarshalers.
+	// This includes some hand-coded .pb.go files and
+	// bootstrap protos.
+	// TODO: fix all of those and then add Unmarshal to
+	// the Message interface. Then:
+	// The cast above and code below can be deleted.
+	// The old unmarshaler can be deleted.
+	// Clients can call Unmarshal directly (can already do that, actually).
+	var info InternalMessageInfo
+	err := info.Unmarshal(pb, p.buf[p.index:])
+	p.index = len(p.buf)
+	return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
new file mode 100644
index 0000000..dea2617
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -0,0 +1,350 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+	"sync/atomic"
+)
+
+type generatedDiscarder interface {
+	XXX_DiscardUnknown()
+}
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+//
+// For proto2 messages, the unknown fields of message extensions are only
+// discarded from messages that have been accessed via GetExtension.
+func DiscardUnknown(m Message) {
+	if m, ok := m.(generatedDiscarder); ok {
+		m.XXX_DiscardUnknown()
+		return
+	}
+	// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
+	// but the master branch has no implementation for InternalMessageInfo,
+	// so it would be more work to replicate that approach.
+	discardLegacy(m)
+}
+
+// DiscardUnknown recursively discards all unknown fields.
+func (a *InternalMessageInfo) DiscardUnknown(m Message) {
+	di := atomicLoadDiscardInfo(&a.discard)
+	if di == nil {
+		di = getDiscardInfo(reflect.TypeOf(m).Elem())
+		atomicStoreDiscardInfo(&a.discard, di)
+	}
+	di.discard(toPointer(&m))
+}
+
+type discardInfo struct {
+	typ reflect.Type
+
+	initialized int32 // 0: only typ is valid, 1: everything is valid
+	lock        sync.Mutex
+
+	fields       []discardFieldInfo
+	unrecognized field
+}
+
+type discardFieldInfo struct {
+	field   field // Offset of field, guaranteed to be valid
+	discard func(src pointer)
+}
+
+var (
+	discardInfoMap  = map[reflect.Type]*discardInfo{}
+	discardInfoLock sync.Mutex
+)
+
+func getDiscardInfo(t reflect.Type) *discardInfo {
+	discardInfoLock.Lock()
+	defer discardInfoLock.Unlock()
+	di := discardInfoMap[t]
+	if di == nil {
+		di = &discardInfo{typ: t}
+		discardInfoMap[t] = di
+	}
+	return di
+}
+
+func (di *discardInfo) discard(src pointer) {
+	if src.isNil() {
+		return // Nothing to do.
+	}
+
+	if atomic.LoadInt32(&di.initialized) == 0 {
+		di.computeDiscardInfo()
+	}
+
+	for _, fi := range di.fields {
+		sfp := src.offset(fi.field)
+		fi.discard(sfp)
+	}
+
+	// For proto2 messages, only discard unknown fields in message extensions
+	// that have been accessed via GetExtension.
+	if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
+		// Ignore lock since DiscardUnknown is not concurrency safe.
+		emm, _ := em.extensionsRead()
+		for _, mx := range emm {
+			if m, ok := mx.value.(Message); ok {
+				DiscardUnknown(m)
+			}
+		}
+	}
+
+	if di.unrecognized.IsValid() {
+		*src.offset(di.unrecognized).toBytes() = nil
+	}
+}
+
+func (di *discardInfo) computeDiscardInfo() {
+	di.lock.Lock()
+	defer di.lock.Unlock()
+	if di.initialized != 0 {
+		return
+	}
+	t := di.typ
+	n := t.NumField()
+
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+
+		dfi := discardFieldInfo{field: toField(&f)}
+		tf := f.Type
+
+		// Unwrap tf to get its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
+		}
+
+		switch tf.Kind() {
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
+			case isSlice: // E.g., []*pb.T
+				di := getDiscardInfo(tf)
+				dfi.discard = func(src pointer) {
+					sps := src.getPointerSlice()
+					for _, sp := range sps {
+						if !sp.isNil() {
+							di.discard(sp)
+						}
+					}
+				}
+			default: // E.g., *pb.T
+				di := getDiscardInfo(tf)
+				dfi.discard = func(src pointer) {
+					sp := src.getPointer()
+					if !sp.isNil() {
+						di.discard(sp)
+					}
+				}
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
+			default: // E.g., map[K]V
+				if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
+					dfi.discard = func(src pointer) {
+						sm := src.asPointerTo(tf).Elem()
+						if sm.Len() == 0 {
+							return
+						}
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							DiscardUnknown(val.Interface().(Message))
+						}
+					}
+				} else {
+					dfi.discard = func(pointer) {} // Noop
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
+			default: // E.g., interface{}
+				// TODO: Make this faster?
+				dfi.discard = func(src pointer) {
+					su := src.asPointerTo(tf).Elem()
+					if !su.IsNil() {
+						sv := su.Elem().Elem().Field(0)
+						if sv.Kind() == reflect.Ptr && sv.IsNil() {
+							return
+						}
+						switch sv.Type().Kind() {
+						case reflect.Ptr: // Proto struct (e.g., *T)
+							DiscardUnknown(sv.Interface().(Message))
+						}
+					}
+				}
+			}
+		default:
+			continue
+		}
+		di.fields = append(di.fields, dfi)
+	}
+
+	di.unrecognized = invalidField
+	if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+		if f.Type != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		di.unrecognized = toField(&f)
+	}
+
+	atomic.StoreInt32(&di.initialized, 1)
+}
+
+func discardLegacy(m Message) {
+	v := reflect.ValueOf(m)
+	if v.Kind() != reflect.Ptr || v.IsNil() {
+		return
+	}
+	v = v.Elem()
+	if v.Kind() != reflect.Struct {
+		return
+	}
+	t := v.Type()
+
+	for i := 0; i < v.NumField(); i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		vf := v.Field(i)
+		tf := f.Type
+
+		// Unwrap tf to get its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
+		}
+
+		switch tf.Kind() {
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
+			case isSlice: // E.g., []*pb.T
+				for j := 0; j < vf.Len(); j++ {
+					discardLegacy(vf.Index(j).Interface().(Message))
+				}
+			default: // E.g., *pb.T
+				discardLegacy(vf.Interface().(Message))
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
+			default: // E.g., map[K]V
+				tv := vf.Type().Elem()
+				if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
+					for _, key := range vf.MapKeys() {
+						val := vf.MapIndex(key)
+						discardLegacy(val.Interface().(Message))
+					}
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
+			default: // E.g., test_proto.isCommunique_Union interface
+				if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
+					vf = vf.Elem() // E.g., *test_proto.Communique_Msg
+					if !vf.IsNil() {
+						vf = vf.Elem()   // E.g., test_proto.Communique_Msg
+						vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
+						if vf.Kind() == reflect.Ptr {
+							discardLegacy(vf.Interface().(Message))
+						}
+					}
+				}
+			}
+		}
+	}
+
+	if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
+		if vf.Type() != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		vf.Set(reflect.ValueOf([]byte(nil)))
+	}
+
+	// For proto2 messages, only discard unknown fields in message extensions
+	// that have been accessed via GetExtension.
+	if em, err := extendable(m); err == nil {
+		// Ignore lock since discardLegacy is not concurrency safe.
+		emm, _ := em.extensionsRead()
+		for _, mx := range emm {
+			if m, ok := mx.value.(Message); ok {
+				discardLegacy(m)
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 0000000..3abfed2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,203 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+	"errors"
+	"reflect"
+)
+
+var (
+	// errRepeatedHasNil is the error returned if Marshal is called with
+	// a struct with a repeated field containing a nil element.
+	errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+	// errOneofHasNil is the error returned if Marshal is called with
+	// a struct with a oneof field containing a nil element.
+	errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+	// ErrNil is the error returned if Marshal is called with nil.
+	ErrNil = errors.New("proto: Marshal called with nil")
+
+	// ErrTooLarge is the error returned if Marshal is called with a
+	// message that encodes to >2GB.
+	ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+	var buf [maxVarintBytes]byte
+	var n int
+	for n = 0; x > 127; n++ {
+		buf[n] = 0x80 | uint8(x&0x7F)
+		x >>= 7
+	}
+	buf[n] = uint8(x)
+	n++
+	return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+	for x >= 1<<7 {
+		p.buf = append(p.buf, uint8(x&0x7f|0x80))
+		x >>= 7
+	}
+	p.buf = append(p.buf, uint8(x))
+	return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+	switch {
+	case x < 1<<7:
+		return 1
+	case x < 1<<14:
+		return 2
+	case x < 1<<21:
+		return 3
+	case x < 1<<28:
+		return 4
+	case x < 1<<35:
+		return 5
+	case x < 1<<42:
+		return 6
+	case x < 1<<49:
+		return 7
+	case x < 1<<56:
+		return 8
+	case x < 1<<63:
+		return 9
+	}
+	return 10
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+	p.buf = append(p.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24),
+		uint8(x>>32),
+		uint8(x>>40),
+		uint8(x>>48),
+		uint8(x>>56))
+	return nil
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+	p.buf = append(p.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24))
+	return nil
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+	// use signed number to get arithmetic right shift.
+	return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+	// use signed number to get arithmetic right shift.
+	return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+	p.EncodeVarint(uint64(len(b)))
+	p.buf = append(p.buf, b...)
+	return nil
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+	p.EncodeVarint(uint64(len(s)))
+	p.buf = append(p.buf, s...)
+	return nil
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+	Marshal() ([]byte, error)
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+	siz := Size(pb)
+	p.EncodeVarint(uint64(siz))
+	return p.Marshal(pb)
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return v.IsNil()
+	}
+	return false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 0000000..d4db5a1
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,300 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+	"bytes"
+	"log"
+	"reflect"
+	"strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+  - Two messages are equal iff they are the same type,
+    corresponding fields are equal, unknown field sets
+    are equal, and extensions sets are equal.
+  - Two set scalar fields are equal iff their values are equal.
+    If the fields are of a floating-point type, remember that
+    NaN != x for all x, including NaN. If the message is defined
+    in a proto3 .proto file, fields are not "set"; specifically,
+    zero length proto3 "bytes" fields are equal (nil == {}).
+  - Two repeated fields are equal iff their lengths are the same,
+    and their corresponding elements are equal. Note a "bytes" field,
+    although represented by []byte, is not a repeated field and the
+    rule for the scalar fields described above applies.
+  - Two unset fields are equal.
+  - Two unknown field sets are equal if their current
+    encoded state is equal.
+  - Two extension sets are equal iff they have corresponding
+    elements that are pairwise equal.
+  - Two map fields are equal iff their lengths are the same,
+    and they contain the same set of elements. Zero-length map
+    fields are equal.
+  - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+	if a == nil || b == nil {
+		return a == b
+	}
+	v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+	if v1.Type() != v2.Type() {
+		return false
+	}
+	if v1.Kind() == reflect.Ptr {
+		if v1.IsNil() {
+			return v2.IsNil()
+		}
+		if v2.IsNil() {
+			return false
+		}
+		v1, v2 = v1.Elem(), v2.Elem()
+	}
+	if v1.Kind() != reflect.Struct {
+		return false
+	}
+	return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+	sprop := GetProperties(v1.Type())
+	for i := 0; i < v1.NumField(); i++ {
+		f := v1.Type().Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		f1, f2 := v1.Field(i), v2.Field(i)
+		if f.Type.Kind() == reflect.Ptr {
+			if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+				// both unset
+				continue
+			} else if n1 != n2 {
+				// set/unset mismatch
+				return false
+			}
+			f1, f2 = f1.Elem(), f2.Elem()
+		}
+		if !equalAny(f1, f2, sprop.Prop[i]) {
+			return false
+		}
+	}
+
+	if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_InternalExtensions")
+		if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+			return false
+		}
+	}
+
+	if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_extensions")
+		if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+			return false
+		}
+	}
+
+	uf := v1.FieldByName("XXX_unrecognized")
+	if !uf.IsValid() {
+		return true
+	}
+
+	u1 := uf.Bytes()
+	u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+	return bytes.Equal(u1, u2)
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+	if v1.Type() == protoMessageType {
+		m1, _ := v1.Interface().(Message)
+		m2, _ := v2.Interface().(Message)
+		return Equal(m1, m2)
+	}
+	switch v1.Kind() {
+	case reflect.Bool:
+		return v1.Bool() == v2.Bool()
+	case reflect.Float32, reflect.Float64:
+		return v1.Float() == v2.Float()
+	case reflect.Int32, reflect.Int64:
+		return v1.Int() == v2.Int()
+	case reflect.Interface:
+		// Probably a oneof field; compare the inner values.
+		n1, n2 := v1.IsNil(), v2.IsNil()
+		if n1 || n2 {
+			return n1 == n2
+		}
+		e1, e2 := v1.Elem(), v2.Elem()
+		if e1.Type() != e2.Type() {
+			return false
+		}
+		return equalAny(e1, e2, nil)
+	case reflect.Map:
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		for _, key := range v1.MapKeys() {
+			val2 := v2.MapIndex(key)
+			if !val2.IsValid() {
+				// This key was not found in the second map.
+				return false
+			}
+			if !equalAny(v1.MapIndex(key), val2, nil) {
+				return false
+			}
+		}
+		return true
+	case reflect.Ptr:
+		// Maps may have nil values in them, so check for nil.
+		if v1.IsNil() && v2.IsNil() {
+			return true
+		}
+		if v1.IsNil() != v2.IsNil() {
+			return false
+		}
+		return equalAny(v1.Elem(), v2.Elem(), prop)
+	case reflect.Slice:
+		if v1.Type().Elem().Kind() == reflect.Uint8 {
+			// short circuit: []byte
+
+			// Edge case: if this is in a proto3 message, a zero length
+			// bytes field is considered the zero value.
+			if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+				return true
+			}
+			if v1.IsNil() != v2.IsNil() {
+				return false
+			}
+			return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+		}
+
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		for i := 0; i < v1.Len(); i++ {
+			if !equalAny(v1.Index(i), v2.Index(i), prop) {
+				return false
+			}
+		}
+		return true
+	case reflect.String:
+		return v1.Interface().(string) == v2.Interface().(string)
+	case reflect.Struct:
+		return equalStruct(v1, v2)
+	case reflect.Uint32, reflect.Uint64:
+		return v1.Uint() == v2.Uint()
+	}
+
+	// unknown type, so not a protocol buffer
+	log.Printf("proto: don't know how to compare %v", v1)
+	return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+	em1, _ := x1.extensionsRead()
+	em2, _ := x2.extensionsRead()
+	return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+	if len(em1) != len(em2) {
+		return false
+	}
+
+	for extNum, e1 := range em1 {
+		e2, ok := em2[extNum]
+		if !ok {
+			return false
+		}
+
+		m1, m2 := e1.value, e2.value
+
+		if m1 == nil && m2 == nil {
+			// Both have only encoded form.
+			if bytes.Equal(e1.enc, e2.enc) {
+				continue
+			}
+			// The bytes are different, but the extensions might still be
+			// equal. We need to decode them to compare.
+		}
+
+		if m1 != nil && m2 != nil {
+			// Both are unencoded.
+			if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+				return false
+			}
+			continue
+		}
+
+		// At least one is encoded. To do a semantically correct comparison
+		// we need to unmarshal them first.
+		var desc *ExtensionDesc
+		if m := extensionMaps[base]; m != nil {
+			desc = m[extNum]
+		}
+		if desc == nil {
+			// If both have only encoded form and the bytes are the same,
+			// it is handled above. We get here when the bytes are different.
+			// We don't know how to decode it, so just compare them as byte
+			// slices.
+			log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+			return false
+		}
+		var err error
+		if m1 == nil {
+			m1, err = decodeExtension(e1.enc, desc)
+		}
+		if m2 == nil && err == nil {
+			m2, err = decodeExtension(e2.enc, desc)
+		}
+		if err != nil {
+			// The encoded form is invalid.
+			log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+			return false
+		}
+		if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 0000000..816a3b9
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,543 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"strconv"
+	"sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+	Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	extensionsWrite() map[int32]Extension
+	extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+	extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+	return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+	return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock()   {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, error) {
+	switch p := p.(type) {
+	case extendableProto:
+		if isNilPtr(p) {
+			return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+		}
+		return p, nil
+	case extendableProtoV1:
+		if isNilPtr(p) {
+			return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+		}
+		return extensionAdapter{p}, nil
+	}
+	// Don't allocate a specific error containing %T:
+	// this is the hot path for Clone and MarshalText.
+	return nil, errNotExtendable
+}
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+func isNilPtr(x interface{}) bool {
+	v := reflect.ValueOf(x)
+	return v.Kind() == reflect.Ptr && v.IsNil()
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+	// The struct must be indirect so that if a user inadvertently copies a
+	// generated message and its embedded XXX_InternalExtensions, they
+	// avoid the mayhem of a copied mutex.
+	//
+	// The mutex serializes all logically read-only operations to p.extensionMap.
+	// It is up to the client to ensure that write operations to p.extensionMap are
+	// mutually exclusive with other accesses.
+	p *struct {
+		mu           sync.Mutex
+		extensionMap map[int32]Extension
+	}
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+	if e.p == nil {
+		e.p = new(struct {
+			mu           sync.Mutex
+			extensionMap map[int32]Extension
+		})
+		e.p.extensionMap = make(map[int32]Extension)
+	}
+	return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use.  It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+	if e.p == nil {
+		return nil, nil
+	}
+	return e.p.extensionMap, &e.p.mu
+}
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+	ExtendedType  Message     // nil pointer to the type that is being extended
+	ExtensionType interface{} // nil pointer to the extension type
+	Field         int32       // field number
+	Name          string      // fully-qualified name of extension, for text formatting
+	Tag           string      // protobuf tag style
+	Filename      string      // name of the file in which the extension is defined
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+	t := reflect.TypeOf(ed.ExtensionType)
+	return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+	// When an extension is stored in a message using SetExtension
+	// only desc and value are set. When the message is marshaled
+	// enc will be set to the encoded form of the message.
+	//
+	// When a message is unmarshaled and contains extensions, each
+	// extension will have only enc set. When such an extension is
+	// accessed using GetExtension (or GetExtensions) desc and value
+	// will be set.
+	desc  *ExtensionDesc
+	value interface{}
+	enc   []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+	epb, err := extendable(base)
+	if err != nil {
+		return
+	}
+	extmap := epb.extensionsWrite()
+	extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+	for _, er := range pb.ExtensionRangeArray() {
+		if er.Start <= field && field <= er.End {
+			return true
+		}
+	}
+	return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+	var pbi interface{} = pb
+	// Check the extended type.
+	if ea, ok := pbi.(extensionAdapter); ok {
+		pbi = ea.extendableProtoV1
+	}
+	if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+		return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
+	}
+	// Check the range.
+	if !isExtensionField(pb, extension.Field) {
+		return errors.New("proto: bad extension number; not in declared ranges")
+	}
+	return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+	base  reflect.Type
+	field int32
+}
+
+var extProp = struct {
+	sync.RWMutex
+	m map[extPropKey]*Properties
+}{
+	m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+	key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+	extProp.RLock()
+	if prop, ok := extProp.m[key]; ok {
+		extProp.RUnlock()
+		return prop
+	}
+	extProp.RUnlock()
+
+	extProp.Lock()
+	defer extProp.Unlock()
+	// Check again.
+	if prop, ok := extProp.m[key]; ok {
+		return prop
+	}
+
+	prop := new(Properties)
+	prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+	extProp.m[key] = prop
+	return prop
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+	// TODO: Check types, field numbers, etc.?
+	epb, err := extendable(pb)
+	if err != nil {
+		return false
+	}
+	extmap, mu := epb.extensionsRead()
+	if extmap == nil {
+		return false
+	}
+	mu.Lock()
+	_, ok := extmap[extension.Field]
+	mu.Unlock()
+	return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return
+	}
+	// TODO: Check types, field numbers, etc.?
+	extmap := epb.extensionsWrite()
+	delete(extmap, extension.Field)
+}
+
+// GetExtension retrieves a proto2 extended field from pb.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes of the field extension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+
+	if extension.ExtendedType != nil {
+		// can only check type if this is a complete descriptor
+		if err := checkExtensionTypes(epb, extension); err != nil {
+			return nil, err
+		}
+	}
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return defaultExtensionValue(extension)
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	e, ok := emap[extension.Field]
+	if !ok {
+		// defaultExtensionValue returns the default value or
+		// ErrMissingExtension if there is no default.
+		return defaultExtensionValue(extension)
+	}
+
+	if e.value != nil {
+		// Already decoded. Check the descriptor, though.
+		if e.desc != extension {
+			// This shouldn't happen. If it does, it means that
+			// GetExtension was called twice with two different
+			// descriptors with the same field number.
+			return nil, errors.New("proto: descriptor conflict")
+		}
+		return e.value, nil
+	}
+
+	if extension.ExtensionType == nil {
+		// incomplete descriptor
+		return e.enc, nil
+	}
+
+	v, err := decodeExtension(e.enc, extension)
+	if err != nil {
+		return nil, err
+	}
+
+	// Remember the decoded version and drop the encoded version.
+	// That way it is safe to mutate what we return.
+	e.value = v
+	e.desc = extension
+	e.enc = nil
+	emap[extension.Field] = e
+	return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+	if extension.ExtensionType == nil {
+		// incomplete descriptor, so no default
+		return nil, ErrMissingExtension
+	}
+
+	t := reflect.TypeOf(extension.ExtensionType)
+	props := extensionProperties(extension)
+
+	sf, _, err := fieldDefault(t, props)
+	if err != nil {
+		return nil, err
+	}
+
+	if sf == nil || sf.value == nil {
+		// There is no default value.
+		return nil, ErrMissingExtension
+	}
+
+	if t.Kind() != reflect.Ptr {
+		// We do not need to return a Ptr, we can directly return sf.value.
+		return sf.value, nil
+	}
+
+	// We need to return an interface{} that is a pointer to sf.value.
+	value := reflect.New(t).Elem()
+	value.Set(reflect.New(value.Type().Elem()))
+	if sf.kind == reflect.Int32 {
+		// We may have an int32 or an enum, but the underlying data is int32.
+		// Since we can't set an int32 into a non int32 reflect.value directly
+		// set it as a int32.
+		value.Elem().SetInt(int64(sf.value.(int32)))
+	} else {
+		value.Elem().Set(reflect.ValueOf(sf.value))
+	}
+	return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+	t := reflect.TypeOf(extension.ExtensionType)
+	unmarshal := typeUnmarshaler(t, extension.Tag)
+
+	// t is a pointer to a struct, pointer to basic type or a slice.
+	// Allocate space to store the pointer/slice.
+	value := reflect.New(t).Elem()
+
+	var err error
+	for {
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		wire := int(x) & 7
+
+		b, err = unmarshal(b, valToPointer(value.Addr()), wire)
+		if err != nil {
+			return nil, err
+		}
+
+		if len(b) == 0 {
+			break
+		}
+	}
+	return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+	extensions = make([]interface{}, len(es))
+	for i, e := range es {
+		extensions[i], err = GetExtension(epb, e)
+		if err == ErrMissingExtension {
+			err = nil
+		}
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+	registeredExtensions := RegisteredExtensions(pb)
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return nil, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	extensions := make([]*ExtensionDesc, 0, len(emap))
+	for extid, e := range emap {
+		desc := e.desc
+		if desc == nil {
+			desc = registeredExtensions[extid]
+			if desc == nil {
+				desc = &ExtensionDesc{Field: extid}
+			}
+		}
+
+		extensions = append(extensions, desc)
+	}
+	return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+	epb, err := extendable(pb)
+	if err != nil {
+		return err
+	}
+	if err := checkExtensionTypes(epb, extension); err != nil {
+		return err
+	}
+	typ := reflect.TypeOf(extension.ExtensionType)
+	if typ != reflect.TypeOf(value) {
+		return errors.New("proto: bad extension value type")
+	}
+	// nil extension values need to be caught early, because the
+	// encoder can't distinguish an ErrNil due to a nil extension
+	// from an ErrNil due to a missing field. Extensions are
+	// always optional, so the encoder would just swallow the error
+	// and drop all the extensions from the encoded message.
+	if reflect.ValueOf(value).IsNil() {
+		return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+	}
+
+	extmap := epb.extensionsWrite()
+	extmap[extension.Field] = Extension{desc: extension, value: value}
+	return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return
+	}
+	m := epb.extensionsWrite()
+	for k := range m {
+		delete(m, k)
+	}
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+	st := reflect.TypeOf(desc.ExtendedType).Elem()
+	m := extensionMaps[st]
+	if m == nil {
+		m = make(map[int32]*ExtensionDesc)
+		extensionMaps[st] = m
+	}
+	if _, ok := m[desc.Field]; ok {
+		panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+	}
+	m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+	return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 0000000..75565cc
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,979 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers.  It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+  - Names are turned from camel_case to CamelCase for export.
+  - There are no methods on v to set fields; just treat
+	them as structure fields.
+  - There are getters that return a field's value if set,
+	and return the field's default value if unset.
+	The getters work even if the receiver is a nil message.
+  - The zero value for a struct is its correct initialization state.
+	All desired fields must be set before marshaling.
+  - A Reset() method will restore a protobuf struct to its zero state.
+  - Non-repeated fields are pointers to the values; nil means unset.
+	That is, optional or required field int32 f becomes F *int32.
+  - Repeated fields are slices.
+  - Helper functions are available to aid the setting of fields.
+	msg.Foo = proto.String("hello") // set field
+  - Constants are defined to hold the default values of all fields that
+	have them.  They have the form Default_StructName_FieldName.
+	Because the getter methods handle defaulted values,
+	direct use of these constants should be rare.
+  - Enums are given type names and maps from names to values.
+	Enum values are prefixed by the enclosing message's name, or by the
+	enum's type name if it is a top-level enum. Enum types have a String
+	method, and a Enum method to assist in message construction.
+  - Nested messages, groups and enums have type names prefixed with the name of
+	the surrounding message type.
+  - Extensions are given descriptor names that start with E_,
+	followed by an underscore-delimited list of the nested messages
+	that contain it (if any) followed by the CamelCased name of the
+	extension field itself.  HasExtension, ClearExtension, GetExtension
+	and SetExtension are functions for manipulating extensions.
+  - Oneof field sets are given a single field in their message,
+	with distinguished wrapper types for each possible field value.
+  - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+  - Non-repeated fields of non-message type are values instead of pointers.
+  - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+	package example;
+
+	enum FOO { X = 17; }
+
+	message Test {
+	  required string label = 1;
+	  optional int32 type = 2 [default=77];
+	  repeated int64 reps = 3;
+	  optional group OptionalGroup = 4 {
+	    required string RequiredField = 5;
+	  }
+	  oneof union {
+	    int32 number = 6;
+	    string name = 7;
+	  }
+	}
+
+The resulting file, test.pb.go, is:
+
+	package example
+
+	import proto "github.com/golang/protobuf/proto"
+	import math "math"
+
+	type FOO int32
+	const (
+		FOO_X FOO = 17
+	)
+	var FOO_name = map[int32]string{
+		17: "X",
+	}
+	var FOO_value = map[string]int32{
+		"X": 17,
+	}
+
+	func (x FOO) Enum() *FOO {
+		p := new(FOO)
+		*p = x
+		return p
+	}
+	func (x FOO) String() string {
+		return proto.EnumName(FOO_name, int32(x))
+	}
+	func (x *FOO) UnmarshalJSON(data []byte) error {
+		value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+		if err != nil {
+			return err
+		}
+		*x = FOO(value)
+		return nil
+	}
+
+	type Test struct {
+		Label         *string             `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+		Type          *int32              `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+		Reps          []int64             `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+		Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+		// Types that are valid to be assigned to Union:
+		//	*Test_Number
+		//	*Test_Name
+		Union            isTest_Union `protobuf_oneof:"union"`
+		XXX_unrecognized []byte       `json:"-"`
+	}
+	func (m *Test) Reset()         { *m = Test{} }
+	func (m *Test) String() string { return proto.CompactTextString(m) }
+	func (*Test) ProtoMessage() {}
+
+	type isTest_Union interface {
+		isTest_Union()
+	}
+
+	type Test_Number struct {
+		Number int32 `protobuf:"varint,6,opt,name=number"`
+	}
+	type Test_Name struct {
+		Name string `protobuf:"bytes,7,opt,name=name"`
+	}
+
+	func (*Test_Number) isTest_Union() {}
+	func (*Test_Name) isTest_Union()   {}
+
+	func (m *Test) GetUnion() isTest_Union {
+		if m != nil {
+			return m.Union
+		}
+		return nil
+	}
+	const Default_Test_Type int32 = 77
+
+	func (m *Test) GetLabel() string {
+		if m != nil && m.Label != nil {
+			return *m.Label
+		}
+		return ""
+	}
+
+	func (m *Test) GetType() int32 {
+		if m != nil && m.Type != nil {
+			return *m.Type
+		}
+		return Default_Test_Type
+	}
+
+	func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+		if m != nil {
+			return m.Optionalgroup
+		}
+		return nil
+	}
+
+	type Test_OptionalGroup struct {
+		RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+	}
+	func (m *Test_OptionalGroup) Reset()         { *m = Test_OptionalGroup{} }
+	func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+	func (m *Test_OptionalGroup) GetRequiredField() string {
+		if m != nil && m.RequiredField != nil {
+			return *m.RequiredField
+		}
+		return ""
+	}
+
+	func (m *Test) GetNumber() int32 {
+		if x, ok := m.GetUnion().(*Test_Number); ok {
+			return x.Number
+		}
+		return 0
+	}
+
+	func (m *Test) GetName() string {
+		if x, ok := m.GetUnion().(*Test_Name); ok {
+			return x.Name
+		}
+		return ""
+	}
+
+	func init() {
+		proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+	}
+
+To create and play with a Test object:
+
+	package main
+
+	import (
+		"log"
+
+		"github.com/golang/protobuf/proto"
+		pb "./example.pb"
+	)
+
+	func main() {
+		test := &pb.Test{
+			Label: proto.String("hello"),
+			Type:  proto.Int32(17),
+			Reps:  []int64{1, 2, 3},
+			Optionalgroup: &pb.Test_OptionalGroup{
+				RequiredField: proto.String("good bye"),
+			},
+			Union: &pb.Test_Name{"fred"},
+		}
+		data, err := proto.Marshal(test)
+		if err != nil {
+			log.Fatal("marshaling error: ", err)
+		}
+		newTest := &pb.Test{}
+		err = proto.Unmarshal(data, newTest)
+		if err != nil {
+			log.Fatal("unmarshaling error: ", err)
+		}
+		// Now test and newTest contain the same data.
+		if test.GetLabel() != newTest.GetLabel() {
+			log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+		}
+		// Use a type switch to determine which oneof was set.
+		switch u := test.Union.(type) {
+		case *pb.Test_Number: // u.Number contains the number.
+		case *pb.Test_Name: // u.Name contains the string.
+		}
+		// etc.
+	}
+*/
+package proto
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"reflect"
+	"sort"
+	"strconv"
+	"sync"
+)
+
+// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
+// Marshal reports this when a required field is not initialized.
+// Unmarshal reports this when a required field is missing from the wire data.
+type RequiredNotSetError struct{ field string }
+
+func (e *RequiredNotSetError) Error() string {
+	if e.field == "" {
+		return fmt.Sprintf("proto: required field not set")
+	}
+	return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+	return true
+}
+
+type invalidUTF8Error struct{ field string }
+
+func (e *invalidUTF8Error) Error() string {
+	if e.field == "" {
+		return "proto: invalid UTF-8 detected"
+	}
+	return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
+}
+func (e *invalidUTF8Error) InvalidUTF8() bool {
+	return true
+}
+
+// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
+// This error should not be exposed to the external API as such errors should
+// be recreated with the field information.
+var errInvalidUTF8 = &invalidUTF8Error{}
+
+// isNonFatal reports whether the error is either a RequiredNotSet error
+// or a InvalidUTF8 error.
+func isNonFatal(err error) bool {
+	if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
+		return true
+	}
+	if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
+		return true
+	}
+	return false
+}
+
+type nonFatal struct{ E error }
+
+// Merge merges err into nf and reports whether it was successful.
+// Otherwise it returns false for any fatal non-nil errors.
+func (nf *nonFatal) Merge(err error) (ok bool) {
+	if err == nil {
+		return true // not an error
+	}
+	if !isNonFatal(err) {
+		return false // fatal error
+	}
+	if nf.E == nil {
+		nf.E = err // store first instance of non-fatal error
+	}
+	return true
+}
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+	Reset()
+	String() string
+	ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders.  Useful for tuning the library itself.
+type Stats struct {
+	Emalloc uint64 // mallocs in encode
+	Dmalloc uint64 // mallocs in decode
+	Encode  uint64 // number of encodes
+	Decode  uint64 // number of decodes
+	Chit    uint64 // number of cache hits
+	Cmiss   uint64 // number of cache misses
+	Size    uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers.  It may be reused between invocations to
+// reduce memory usage.  It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+	buf   []byte // encode/decode byte stream
+	index int    // read point
+
+	deterministic bool
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+	return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+	p.buf = p.buf[0:0] // for reading/writing
+	p.index = 0        // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+	p.buf = s
+	p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+// SetDeterministic sets whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+//   - Repeated serialization of a message will return the same bytes.
+//   - Different processes of the same binary (which may be executing on
+//     different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (p *Buffer) SetDeterministic(deterministic bool) {
+	p.deterministic = deterministic
+}
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+	return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+	return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+	p := new(int32)
+	*p = int32(v)
+	return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+	return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+	return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+	return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+	return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+	return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+	return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name.  Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+	s, ok := m[v]
+	if ok {
+		return s
+	}
+	return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+	if data[0] == '"' {
+		// New style: enums are strings.
+		var repr string
+		if err := json.Unmarshal(data, &repr); err != nil {
+			return -1, err
+		}
+		val, ok := m[repr]
+		if !ok {
+			return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+		}
+		return val, nil
+	}
+	// Old style: enums are ints.
+	var val int32
+	if err := json.Unmarshal(data, &val); err != nil {
+		return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+	}
+	return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+	var u uint64
+
+	obuf := p.buf
+	index := p.index
+	p.buf = b
+	p.index = 0
+	depth := 0
+
+	fmt.Printf("\n--- %s ---\n", s)
+
+out:
+	for {
+		for i := 0; i < depth; i++ {
+			fmt.Print("  ")
+		}
+
+		index := p.index
+		if index == len(p.buf) {
+			break
+		}
+
+		op, err := p.DecodeVarint()
+		if err != nil {
+			fmt.Printf("%3d: fetching op err %v\n", index, err)
+			break out
+		}
+		tag := op >> 3
+		wire := op & 7
+
+		switch wire {
+		default:
+			fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+				index, tag, wire)
+			break out
+
+		case WireBytes:
+			var r []byte
+
+			r, err = p.DecodeRawBytes(false)
+			if err != nil {
+				break out
+			}
+			fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+			if len(r) <= 6 {
+				for i := 0; i < len(r); i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+			} else {
+				for i := 0; i < 3; i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+				fmt.Printf(" ..")
+				for i := len(r) - 3; i < len(r); i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+			}
+			fmt.Printf("\n")
+
+		case WireFixed32:
+			u, err = p.DecodeFixed32()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+		case WireFixed64:
+			u, err = p.DecodeFixed64()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+		case WireVarint:
+			u, err = p.DecodeVarint()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+		case WireStartGroup:
+			fmt.Printf("%3d: t=%3d start\n", index, tag)
+			depth++
+
+		case WireEndGroup:
+			depth--
+			fmt.Printf("%3d: t=%3d end\n", index, tag)
+		}
+	}
+
+	if depth != 0 {
+		fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+	}
+	fmt.Printf("\n")
+
+	p.buf = obuf
+	p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+	setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+	v = v.Elem()
+
+	defaultMu.RLock()
+	dm, ok := defaults[v.Type()]
+	defaultMu.RUnlock()
+	if !ok {
+		dm = buildDefaultMessage(v.Type())
+		defaultMu.Lock()
+		defaults[v.Type()] = dm
+		defaultMu.Unlock()
+	}
+
+	for _, sf := range dm.scalars {
+		f := v.Field(sf.index)
+		if !f.IsNil() {
+			// field already set
+			continue
+		}
+		dv := sf.value
+		if dv == nil && !zeros {
+			// no explicit default, and don't want to set zeros
+			continue
+		}
+		fptr := f.Addr().Interface() // **T
+		// TODO: Consider batching the allocations we do here.
+		switch sf.kind {
+		case reflect.Bool:
+			b := new(bool)
+			if dv != nil {
+				*b = dv.(bool)
+			}
+			*(fptr.(**bool)) = b
+		case reflect.Float32:
+			f := new(float32)
+			if dv != nil {
+				*f = dv.(float32)
+			}
+			*(fptr.(**float32)) = f
+		case reflect.Float64:
+			f := new(float64)
+			if dv != nil {
+				*f = dv.(float64)
+			}
+			*(fptr.(**float64)) = f
+		case reflect.Int32:
+			// might be an enum
+			if ft := f.Type(); ft != int32PtrType {
+				// enum
+				f.Set(reflect.New(ft.Elem()))
+				if dv != nil {
+					f.Elem().SetInt(int64(dv.(int32)))
+				}
+			} else {
+				// int32 field
+				i := new(int32)
+				if dv != nil {
+					*i = dv.(int32)
+				}
+				*(fptr.(**int32)) = i
+			}
+		case reflect.Int64:
+			i := new(int64)
+			if dv != nil {
+				*i = dv.(int64)
+			}
+			*(fptr.(**int64)) = i
+		case reflect.String:
+			s := new(string)
+			if dv != nil {
+				*s = dv.(string)
+			}
+			*(fptr.(**string)) = s
+		case reflect.Uint8:
+			// exceptional case: []byte
+			var b []byte
+			if dv != nil {
+				db := dv.([]byte)
+				b = make([]byte, len(db))
+				copy(b, db)
+			} else {
+				b = []byte{}
+			}
+			*(fptr.(*[]byte)) = b
+		case reflect.Uint32:
+			u := new(uint32)
+			if dv != nil {
+				*u = dv.(uint32)
+			}
+			*(fptr.(**uint32)) = u
+		case reflect.Uint64:
+			u := new(uint64)
+			if dv != nil {
+				*u = dv.(uint64)
+			}
+			*(fptr.(**uint64)) = u
+		default:
+			log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+		}
+	}
+
+	for _, ni := range dm.nested {
+		f := v.Field(ni)
+		// f is *T or []*T or map[T]*T
+		switch f.Kind() {
+		case reflect.Ptr:
+			if f.IsNil() {
+				continue
+			}
+			setDefaults(f, recur, zeros)
+
+		case reflect.Slice:
+			for i := 0; i < f.Len(); i++ {
+				e := f.Index(i)
+				if e.IsNil() {
+					continue
+				}
+				setDefaults(e, recur, zeros)
+			}
+
+		case reflect.Map:
+			for _, k := range f.MapKeys() {
+				e := f.MapIndex(k)
+				if e.IsNil() {
+					continue
+				}
+				setDefaults(e, recur, zeros)
+			}
+		}
+	}
+}
+
+var (
+	// defaults maps a protocol buffer struct type to a slice of the fields,
+	// with its scalar fields set to their proto-declared non-zero default values.
+	defaultMu sync.RWMutex
+	defaults  = make(map[reflect.Type]defaultMessage)
+
+	int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+	scalars []scalarField
+	nested  []int // struct field index of nested messages
+}
+
+type scalarField struct {
+	index int          // struct field index
+	kind  reflect.Kind // element type (the T in *T or []T)
+	value interface{}  // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+	sprop := GetProperties(t)
+	for _, prop := range sprop.Prop {
+		fi, ok := sprop.decoderTags.get(prop.Tag)
+		if !ok {
+			// XXX_unrecognized
+			continue
+		}
+		ft := t.Field(fi).Type
+
+		sf, nested, err := fieldDefault(ft, prop)
+		switch {
+		case err != nil:
+			log.Print(err)
+		case nested:
+			dm.nested = append(dm.nested, fi)
+		case sf != nil:
+			sf.index = fi
+			dm.scalars = append(dm.scalars, *sf)
+		}
+	}
+
+	return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+	var canHaveDefault bool
+	switch ft.Kind() {
+	case reflect.Ptr:
+		if ft.Elem().Kind() == reflect.Struct {
+			nestedMessage = true
+		} else {
+			canHaveDefault = true // proto2 scalar field
+		}
+
+	case reflect.Slice:
+		switch ft.Elem().Kind() {
+		case reflect.Ptr:
+			nestedMessage = true // repeated message
+		case reflect.Uint8:
+			canHaveDefault = true // bytes field
+		}
+
+	case reflect.Map:
+		if ft.Elem().Kind() == reflect.Ptr {
+			nestedMessage = true // map with message values
+		}
+	}
+
+	if !canHaveDefault {
+		if nestedMessage {
+			return nil, true, nil
+		}
+		return nil, false, nil
+	}
+
+	// We now know that ft is a pointer or slice.
+	sf = &scalarField{kind: ft.Elem().Kind()}
+
+	// scalar fields without defaults
+	if !prop.HasDefault {
+		return sf, false, nil
+	}
+
+	// a scalar field: either *T or []byte
+	switch ft.Elem().Kind() {
+	case reflect.Bool:
+		x, err := strconv.ParseBool(prop.Default)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.Float32:
+		x, err := strconv.ParseFloat(prop.Default, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+		}
+		sf.value = float32(x)
+	case reflect.Float64:
+		x, err := strconv.ParseFloat(prop.Default, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.Int32:
+		x, err := strconv.ParseInt(prop.Default, 10, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+		}
+		sf.value = int32(x)
+	case reflect.Int64:
+		x, err := strconv.ParseInt(prop.Default, 10, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.String:
+		sf.value = prop.Default
+	case reflect.Uint8:
+		// []byte (not *uint8)
+		sf.value = []byte(prop.Default)
+	case reflect.Uint32:
+		x, err := strconv.ParseUint(prop.Default, 10, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+		}
+		sf.value = uint32(x)
+	case reflect.Uint64:
+		x, err := strconv.ParseUint(prop.Default, 10, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	default:
+		return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+	}
+
+	return sf, false, nil
+}
+
+// mapKeys returns a sort.Interface to be used for sorting the map keys.
+// Map fields may have key types of non-float scalars, strings and enums.
+func mapKeys(vs []reflect.Value) sort.Interface {
+	s := mapKeySorter{vs: vs}
+
+	// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
+	if len(vs) == 0 {
+		return s
+	}
+	switch vs[0].Kind() {
+	case reflect.Int32, reflect.Int64:
+		s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+	case reflect.Uint32, reflect.Uint64:
+		s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+	case reflect.Bool:
+		s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
+	case reflect.String:
+		s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
+	default:
+		panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
+	}
+
+	return s
+}
+
+type mapKeySorter struct {
+	vs   []reflect.Value
+	less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int      { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+	return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint32, reflect.Uint64:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.String:
+		return v.String() == ""
+	}
+	return false
+}
+
+// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion2 = true
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion1 = true
+
+// InternalMessageInfo is a type used internally by generated .pb.go files.
+// This type is not intended to be used by non-generated code.
+// This type is not subject to any compatibility guarantee.
+type InternalMessageInfo struct {
+	marshal   *marshalInfo
+	unmarshal *unmarshalInfo
+	merge     *mergeInfo
+	discard   *discardInfo
+}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 0000000..3b6ca41
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,314 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"reflect"
+	"sort"
+	"sync"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+//   message MessageSet {
+//     repeated group Item = 1 {
+//       required int32 type_id = 2;
+//       required string message = 3;
+//     };
+//   }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+	TypeId  *int32 `protobuf:"varint,2,req,name=type_id"`
+	Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+	Item             []*_MessageSet_Item `protobuf:"group,1,rep"`
+	XXX_unrecognized []byte
+	// TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+	MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+	mti, ok := pb.(messageTypeIder)
+	if !ok {
+		return nil
+	}
+	id := mti.MessageTypeId()
+	for _, item := range ms.Item {
+		if *item.TypeId == id {
+			return item
+		}
+	}
+	return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+	return ms.find(pb) != nil
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+	if item := ms.find(pb); item != nil {
+		return Unmarshal(item.Message, pb)
+	}
+	if _, ok := pb.(messageTypeIder); !ok {
+		return errNoMessageTypeID
+	}
+	return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+	msg, err := Marshal(pb)
+	if err != nil {
+		return err
+	}
+	if item := ms.find(pb); item != nil {
+		// reuse existing item
+		item.Message = msg
+		return nil
+	}
+
+	mti, ok := pb.(messageTypeIder)
+	if !ok {
+		return errNoMessageTypeID
+	}
+
+	mtid := mti.MessageTypeId()
+	ms.Item = append(ms.Item, &_MessageSet_Item{
+		TypeId:  &mtid,
+		Message: msg,
+	})
+	return nil
+}
+
+func (ms *messageSet) Reset()         { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage()     {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+	i := 0
+	for ; buf[i]&0x80 != 0; i++ {
+	}
+	return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(exts interface{}) ([]byte, error) {
+	return marshalMessageSet(exts, false)
+}
+
+// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
+func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		var u marshalInfo
+		siz := u.sizeMessageSet(exts)
+		b := make([]byte, 0, siz)
+		return u.appendMessageSet(b, exts, deterministic)
+
+	case map[int32]Extension:
+		// This is an old-style extension map.
+		// Wrap it in a new-style XXX_InternalExtensions.
+		ie := XXX_InternalExtensions{
+			p: &struct {
+				mu           sync.Mutex
+				extensionMap map[int32]Extension
+			}{
+				extensionMap: exts,
+			},
+		}
+
+		var u marshalInfo
+		siz := u.sizeMessageSet(&ie)
+		b := make([]byte, 0, siz)
+		return u.appendMessageSet(b, &ie, deterministic)
+
+	default:
+		return nil, errors.New("proto: not an extension map")
+	}
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		m = exts.extensionsWrite()
+	case map[int32]Extension:
+		m = exts
+	default:
+		return errors.New("proto: not an extension map")
+	}
+
+	ms := new(messageSet)
+	if err := Unmarshal(buf, ms); err != nil {
+		return err
+	}
+	for _, item := range ms.Item {
+		id := *item.TypeId
+		msg := item.Message
+
+		// Restore wire type and field number varint, plus length varint.
+		// Be careful to preserve duplicate items.
+		b := EncodeVarint(uint64(id)<<3 | WireBytes)
+		if ext, ok := m[id]; ok {
+			// Existing data; rip off the tag and length varint
+			// so we join the new data correctly.
+			// We can assume that ext.enc is set because we are unmarshaling.
+			o := ext.enc[len(b):]   // skip wire type and field number
+			_, n := DecodeVarint(o) // calculate length of length varint
+			o = o[n:]               // skip length varint
+			msg = append(o, msg...) // join old data and new data
+		}
+		b = append(b, EncodeVarint(uint64(len(msg)))...)
+		b = append(b, msg...)
+
+		m[id] = Extension{enc: b}
+	}
+	return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		var mu sync.Locker
+		m, mu = exts.extensionsRead()
+		if m != nil {
+			// Keep the extensions map locked until we're done marshaling to prevent
+			// races between marshaling and unmarshaling the lazily-{en,de}coded
+			// values.
+			mu.Lock()
+			defer mu.Unlock()
+		}
+	case map[int32]Extension:
+		m = exts
+	default:
+		return nil, errors.New("proto: not an extension map")
+	}
+	var b bytes.Buffer
+	b.WriteByte('{')
+
+	// Process the map in key order for deterministic output.
+	ids := make([]int32, 0, len(m))
+	for id := range m {
+		ids = append(ids, id)
+	}
+	sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+	for i, id := range ids {
+		ext := m[id]
+		msd, ok := messageSetMap[id]
+		if !ok {
+			// Unknown type; we can't render it, so skip it.
+			continue
+		}
+
+		if i > 0 && b.Len() > 1 {
+			b.WriteByte(',')
+		}
+
+		fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+		x := ext.value
+		if x == nil {
+			x = reflect.New(msd.t.Elem()).Interface()
+			if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+				return nil, err
+			}
+		}
+		d, err := json.Marshal(x)
+		if err != nil {
+			return nil, err
+		}
+		b.Write(d)
+	}
+	b.WriteByte('}')
+	return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
+	// Common-case fast path.
+	if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+		return nil
+	}
+
+	// This is fairly tricky, and it's not clear that it is needed.
+	return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+	t    reflect.Type // pointer to struct
+	name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+	messageSetMap[fieldNum] = messageSetDesc{
+		t:    reflect.TypeOf(m),
+		name: name,
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..b6cad90
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,357 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build purego appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+	"reflect"
+	"sync"
+)
+
+const unsafeAllowed = false
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+	return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// zeroField is a noop when calling pointer.offset.
+var zeroField = field([]int{})
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// The pointer type is for the table-driven decoder.
+// The implementation here uses a reflect.Value of pointer type to
+// create a generic pointer. In pointer_unsafe.go we use unsafe
+// instead of reflect to implement the same (but faster) interface.
+type pointer struct {
+	v reflect.Value
+}
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+	return pointer{v: reflect.ValueOf(*i)}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr bool) pointer {
+	v := reflect.ValueOf(*i)
+	u := reflect.New(v.Type())
+	u.Elem().Set(v)
+	return pointer{v: u}
+}
+
+// valToPointer converts v to a pointer.  v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+	return pointer{v: v}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+	return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
+}
+
+func (p pointer) isNil() bool {
+	return p.v.IsNil()
+}
+
+// grow updates the slice s in place to make it one element longer.
+// s must be addressable.
+// Returns the (addressable) new element.
+func grow(s reflect.Value) reflect.Value {
+	n, m := s.Len(), s.Cap()
+	if n < m {
+		s.SetLen(n + 1)
+	} else {
+		s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
+	}
+	return s.Index(n)
+}
+
+func (p pointer) toInt64() *int64 {
+	return p.v.Interface().(*int64)
+}
+func (p pointer) toInt64Ptr() **int64 {
+	return p.v.Interface().(**int64)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+	return p.v.Interface().(*[]int64)
+}
+
+var int32ptr = reflect.TypeOf((*int32)(nil))
+
+func (p pointer) toInt32() *int32 {
+	return p.v.Convert(int32ptr).Interface().(*int32)
+}
+
+// The toInt32Ptr/Slice methods don't work because of enums.
+// Instead, we must use set/get methods for the int32ptr/slice case.
+/*
+	func (p pointer) toInt32Ptr() **int32 {
+		return p.v.Interface().(**int32)
+}
+	func (p pointer) toInt32Slice() *[]int32 {
+		return p.v.Interface().(*[]int32)
+}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		return p.v.Elem().Interface().(*int32)
+	}
+	// an enum
+	return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
+}
+func (p pointer) setInt32Ptr(v int32) {
+	// Allocate value in a *int32. Possibly convert that to a *enum.
+	// Then assign it to a **int32 or **enum.
+	// Note: we can convert *int32 to *enum, but we can't convert
+	// **int32 to **enum!
+	p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
+}
+
+// getInt32Slice copies []int32 from p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getInt32Slice() []int32 {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		return p.v.Elem().Interface().([]int32)
+	}
+	// an enum
+	// Allocate a []int32, then assign []enum's values into it.
+	// Note: we can't convert []enum to []int32.
+	slice := p.v.Elem()
+	s := make([]int32, slice.Len())
+	for i := 0; i < slice.Len(); i++ {
+		s[i] = int32(slice.Index(i).Int())
+	}
+	return s
+}
+
+// setInt32Slice copies []int32 into p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setInt32Slice(v []int32) {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		p.v.Elem().Set(reflect.ValueOf(v))
+		return
+	}
+	// an enum
+	// Allocate a []enum, then assign []int32's values into it.
+	// Note: we can't convert []enum to []int32.
+	slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
+	for i, x := range v {
+		slice.Index(i).SetInt(int64(x))
+	}
+	p.v.Elem().Set(slice)
+}
+func (p pointer) appendInt32Slice(v int32) {
+	grow(p.v.Elem()).SetInt(int64(v))
+}
+
+func (p pointer) toUint64() *uint64 {
+	return p.v.Interface().(*uint64)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+	return p.v.Interface().(**uint64)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+	return p.v.Interface().(*[]uint64)
+}
+func (p pointer) toUint32() *uint32 {
+	return p.v.Interface().(*uint32)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+	return p.v.Interface().(**uint32)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+	return p.v.Interface().(*[]uint32)
+}
+func (p pointer) toBool() *bool {
+	return p.v.Interface().(*bool)
+}
+func (p pointer) toBoolPtr() **bool {
+	return p.v.Interface().(**bool)
+}
+func (p pointer) toBoolSlice() *[]bool {
+	return p.v.Interface().(*[]bool)
+}
+func (p pointer) toFloat64() *float64 {
+	return p.v.Interface().(*float64)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+	return p.v.Interface().(**float64)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+	return p.v.Interface().(*[]float64)
+}
+func (p pointer) toFloat32() *float32 {
+	return p.v.Interface().(*float32)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+	return p.v.Interface().(**float32)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+	return p.v.Interface().(*[]float32)
+}
+func (p pointer) toString() *string {
+	return p.v.Interface().(*string)
+}
+func (p pointer) toStringPtr() **string {
+	return p.v.Interface().(**string)
+}
+func (p pointer) toStringSlice() *[]string {
+	return p.v.Interface().(*[]string)
+}
+func (p pointer) toBytes() *[]byte {
+	return p.v.Interface().(*[]byte)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+	return p.v.Interface().(*[][]byte)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+	return p.v.Interface().(*XXX_InternalExtensions)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+	return p.v.Interface().(*map[int32]Extension)
+}
+func (p pointer) getPointer() pointer {
+	return pointer{v: p.v.Elem()}
+}
+func (p pointer) setPointer(q pointer) {
+	p.v.Elem().Set(q.v)
+}
+func (p pointer) appendPointer(q pointer) {
+	grow(p.v.Elem()).Set(q.v)
+}
+
+// getPointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getPointerSlice() []pointer {
+	if p.v.IsNil() {
+		return nil
+	}
+	n := p.v.Elem().Len()
+	s := make([]pointer, n)
+	for i := 0; i < n; i++ {
+		s[i] = pointer{v: p.v.Elem().Index(i)}
+	}
+	return s
+}
+
+// setPointerSlice copies []pointer into p as a new []*T.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setPointerSlice(v []pointer) {
+	if v == nil {
+		p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
+		return
+	}
+	s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
+	for _, p := range v {
+		s = reflect.Append(s, p.v)
+	}
+	p.v.Elem().Set(s)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+	if p.v.Elem().IsNil() {
+		return pointer{v: p.v.Elem()}
+	}
+	return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
+}
+
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+	// TODO: check that p.v.Type().Elem() == t?
+	return p.v
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+
+var atomicLock sync.Mutex
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..d55a335
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,308 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !purego,!appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+	"reflect"
+	"sync/atomic"
+	"unsafe"
+)
+
+const unsafeAllowed = true
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+	return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// zeroField is a noop when calling pointer.offset.
+const zeroField = field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+	return f != invalidField
+}
+
+// The pointer type below is for the new table-driven encoder/decoder.
+// The implementation here uses unsafe.Pointer to create a generic pointer.
+// In pointer_reflect.go we use reflect instead of unsafe to implement
+// the same (but slower) interface.
+type pointer struct {
+	p unsafe.Pointer
+}
+
+// size of pointer
+var ptrSize = unsafe.Sizeof(uintptr(0))
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+	// Super-tricky - read pointer out of data word of interface value.
+	// Saves ~25ns over the equivalent:
+	// return valToPointer(reflect.ValueOf(*i))
+	return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr bool) pointer {
+	// Super-tricky - read or get the address of data word of interface value.
+	if isptr {
+		// The interface is of pointer type, thus it is a direct interface.
+		// The data word is the pointer data itself. We take its address.
+		return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+	}
+	// The interface is not of pointer type. The data word is the pointer
+	// to the data.
+	return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+	return pointer{p: unsafe.Pointer(v.Pointer())}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+	// For safety, we should panic if !f.IsValid, however calling panic causes
+	// this to no longer be inlineable, which is a serious performance cost.
+	/*
+		if !f.IsValid() {
+			panic("invalid field")
+		}
+	*/
+	return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
+}
+
+func (p pointer) isNil() bool {
+	return p.p == nil
+}
+
+func (p pointer) toInt64() *int64 {
+	return (*int64)(p.p)
+}
+func (p pointer) toInt64Ptr() **int64 {
+	return (**int64)(p.p)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+	return (*[]int64)(p.p)
+}
+func (p pointer) toInt32() *int32 {
+	return (*int32)(p.p)
+}
+
+// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
+/*
+	func (p pointer) toInt32Ptr() **int32 {
+		return (**int32)(p.p)
+	}
+	func (p pointer) toInt32Slice() *[]int32 {
+		return (*[]int32)(p.p)
+	}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+	return *(**int32)(p.p)
+}
+func (p pointer) setInt32Ptr(v int32) {
+	*(**int32)(p.p) = &v
+}
+
+// getInt32Slice loads a []int32 from p.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getInt32Slice() []int32 {
+	return *(*[]int32)(p.p)
+}
+
+// setInt32Slice stores a []int32 to p.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setInt32Slice(v []int32) {
+	*(*[]int32)(p.p) = v
+}
+
+// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
+func (p pointer) appendInt32Slice(v int32) {
+	s := (*[]int32)(p.p)
+	*s = append(*s, v)
+}
+
+func (p pointer) toUint64() *uint64 {
+	return (*uint64)(p.p)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+	return (**uint64)(p.p)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+	return (*[]uint64)(p.p)
+}
+func (p pointer) toUint32() *uint32 {
+	return (*uint32)(p.p)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+	return (**uint32)(p.p)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+	return (*[]uint32)(p.p)
+}
+func (p pointer) toBool() *bool {
+	return (*bool)(p.p)
+}
+func (p pointer) toBoolPtr() **bool {
+	return (**bool)(p.p)
+}
+func (p pointer) toBoolSlice() *[]bool {
+	return (*[]bool)(p.p)
+}
+func (p pointer) toFloat64() *float64 {
+	return (*float64)(p.p)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+	return (**float64)(p.p)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+	return (*[]float64)(p.p)
+}
+func (p pointer) toFloat32() *float32 {
+	return (*float32)(p.p)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+	return (**float32)(p.p)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+	return (*[]float32)(p.p)
+}
+func (p pointer) toString() *string {
+	return (*string)(p.p)
+}
+func (p pointer) toStringPtr() **string {
+	return (**string)(p.p)
+}
+func (p pointer) toStringSlice() *[]string {
+	return (*[]string)(p.p)
+}
+func (p pointer) toBytes() *[]byte {
+	return (*[]byte)(p.p)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+	return (*[][]byte)(p.p)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+	return (*XXX_InternalExtensions)(p.p)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+	return (*map[int32]Extension)(p.p)
+}
+
+// getPointerSlice loads []*T from p as a []pointer.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getPointerSlice() []pointer {
+	// Super-tricky - p should point to a []*T where T is a
+	// message type. We load it as []pointer.
+	return *(*[]pointer)(p.p)
+}
+
+// setPointerSlice stores []pointer into p as a []*T.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setPointerSlice(v []pointer) {
+	// Super-tricky - p should point to a []*T where T is a
+	// message type. We store it as []pointer.
+	*(*[]pointer)(p.p) = v
+}
+
+// getPointer loads the pointer at p and returns it.
+func (p pointer) getPointer() pointer {
+	return pointer{p: *(*unsafe.Pointer)(p.p)}
+}
+
+// setPointer stores the pointer q at p.
+func (p pointer) setPointer(q pointer) {
+	*(*unsafe.Pointer)(p.p) = q.p
+}
+
+// append q to the slice pointed to by p.
+func (p pointer) appendPointer(q pointer) {
+	s := (*[]unsafe.Pointer)(p.p)
+	*s = append(*s, q.p)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+	// Super-tricky - read pointer out of data word of interface value.
+	return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
+}
+
+// asPointerTo returns a reflect.Value that is a pointer to an
+// object of type t stored at p.
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+	return reflect.NewAt(t, p.p)
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+	return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+	return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+	return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+	return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 0000000..50b99b8
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,544 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+	"fmt"
+	"log"
+	"os"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+	WireVarint     = 0
+	WireFixed64    = 1
+	WireBytes      = 2
+	WireStartGroup = 3
+	WireEndGroup   = 4
+	WireFixed32    = 5
+)
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+	fastTags []int
+	slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+	if t > 0 && t < tagMapFastLimit {
+		if t >= len(p.fastTags) {
+			return 0, false
+		}
+		fi := p.fastTags[t]
+		return fi, fi >= 0
+	}
+	fi, ok := p.slowTags[t]
+	return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+	if t > 0 && t < tagMapFastLimit {
+		for len(p.fastTags) < t+1 {
+			p.fastTags = append(p.fastTags, -1)
+		}
+		p.fastTags[t] = fi
+		return
+	}
+	if p.slowTags == nil {
+		p.slowTags = make(map[int]int)
+	}
+	p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+	Prop             []*Properties  // properties for each field
+	reqCount         int            // required count
+	decoderTags      tagMap         // map from proto tag to struct field number
+	decoderOrigNames map[string]int // map from original name to struct field number
+	order            []int          // list of struct field numbers in tag order
+
+	// OneofTypes contains information about the oneof fields in this message.
+	// It is keyed by the original name of a field.
+	OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+	Type  reflect.Type // pointer to generated struct type for this oneof field
+	Field int          // struct field number of the containing oneof in the message
+	Prop  *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+	return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+	Name     string // name of the field, for error messages
+	OrigName string // original name before protocol compiler (always set)
+	JSONName string // name to use for JSON; determined by protoc
+	Wire     string
+	WireType int
+	Tag      int
+	Required bool
+	Optional bool
+	Repeated bool
+	Packed   bool   // relevant for repeated primitives only
+	Enum     string // set for enum types only
+	proto3   bool   // whether this is known to be a proto3 field
+	oneof    bool   // whether this is a oneof field
+
+	Default    string // default value
+	HasDefault bool   // whether an explicit default was provided
+
+	stype reflect.Type      // set for struct types only
+	sprop *StructProperties // set for struct types only
+
+	mtype      reflect.Type // set for map types only
+	MapKeyProp *Properties  // set for map types only
+	MapValProp *Properties  // set for map types only
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+	s := p.Wire
+	s += ","
+	s += strconv.Itoa(p.Tag)
+	if p.Required {
+		s += ",req"
+	}
+	if p.Optional {
+		s += ",opt"
+	}
+	if p.Repeated {
+		s += ",rep"
+	}
+	if p.Packed {
+		s += ",packed"
+	}
+	s += ",name=" + p.OrigName
+	if p.JSONName != p.OrigName {
+		s += ",json=" + p.JSONName
+	}
+	if p.proto3 {
+		s += ",proto3"
+	}
+	if p.oneof {
+		s += ",oneof"
+	}
+	if len(p.Enum) > 0 {
+		s += ",enum=" + p.Enum
+	}
+	if p.HasDefault {
+		s += ",def=" + p.Default
+	}
+	return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+	// "bytes,49,opt,name=foo,def=hello!"
+	fields := strings.Split(s, ",") // breaks def=, but handled below.
+	if len(fields) < 2 {
+		fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+		return
+	}
+
+	p.Wire = fields[0]
+	switch p.Wire {
+	case "varint":
+		p.WireType = WireVarint
+	case "fixed32":
+		p.WireType = WireFixed32
+	case "fixed64":
+		p.WireType = WireFixed64
+	case "zigzag32":
+		p.WireType = WireVarint
+	case "zigzag64":
+		p.WireType = WireVarint
+	case "bytes", "group":
+		p.WireType = WireBytes
+		// no numeric converter for non-numeric types
+	default:
+		fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+		return
+	}
+
+	var err error
+	p.Tag, err = strconv.Atoi(fields[1])
+	if err != nil {
+		return
+	}
+
+outer:
+	for i := 2; i < len(fields); i++ {
+		f := fields[i]
+		switch {
+		case f == "req":
+			p.Required = true
+		case f == "opt":
+			p.Optional = true
+		case f == "rep":
+			p.Repeated = true
+		case f == "packed":
+			p.Packed = true
+		case strings.HasPrefix(f, "name="):
+			p.OrigName = f[5:]
+		case strings.HasPrefix(f, "json="):
+			p.JSONName = f[5:]
+		case strings.HasPrefix(f, "enum="):
+			p.Enum = f[5:]
+		case f == "proto3":
+			p.proto3 = true
+		case f == "oneof":
+			p.oneof = true
+		case strings.HasPrefix(f, "def="):
+			p.HasDefault = true
+			p.Default = f[4:] // rest of string
+			if i+1 < len(fields) {
+				// Commas aren't escaped, and def is always last.
+				p.Default += "," + strings.Join(fields[i+1:], ",")
+				break outer
+			}
+		}
+	}
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// setFieldProps initializes the field properties for submessages and maps.
+func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+	switch t1 := typ; t1.Kind() {
+	case reflect.Ptr:
+		if t1.Elem().Kind() == reflect.Struct {
+			p.stype = t1.Elem()
+		}
+
+	case reflect.Slice:
+		if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
+			p.stype = t2.Elem()
+		}
+
+	case reflect.Map:
+		p.mtype = t1
+		p.MapKeyProp = &Properties{}
+		p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+		p.MapValProp = &Properties{}
+		vtype := p.mtype.Elem()
+		if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+			// The value type is not a message (*T) or bytes ([]byte),
+			// so we need encoders for the pointer to this type.
+			vtype = reflect.PtrTo(vtype)
+		}
+		p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+	}
+
+	if p.stype != nil {
+		if lockGetProp {
+			p.sprop = GetProperties(p.stype)
+		} else {
+			p.sprop = getPropertiesLocked(p.stype)
+		}
+	}
+}
+
+var (
+	marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+)
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+	p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+	// "bytes,49,opt,def=hello!"
+	p.Name = name
+	p.OrigName = name
+	if tag == "" {
+		return
+	}
+	p.Parse(tag)
+	p.setFieldProps(typ, f, lockGetProp)
+}
+
+var (
+	propertiesMu  sync.RWMutex
+	propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+	if t.Kind() != reflect.Struct {
+		panic("proto: type must have kind struct")
+	}
+
+	// Most calls to GetProperties in a long-running program will be
+	// retrieving details for types we have seen before.
+	propertiesMu.RLock()
+	sprop, ok := propertiesMap[t]
+	propertiesMu.RUnlock()
+	if ok {
+		if collectStats {
+			stats.Chit++
+		}
+		return sprop
+	}
+
+	propertiesMu.Lock()
+	sprop = getPropertiesLocked(t)
+	propertiesMu.Unlock()
+	return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+	if prop, ok := propertiesMap[t]; ok {
+		if collectStats {
+			stats.Chit++
+		}
+		return prop
+	}
+	if collectStats {
+		stats.Cmiss++
+	}
+
+	prop := new(StructProperties)
+	// in case of recursive protos, fill this in now.
+	propertiesMap[t] = prop
+
+	// build properties
+	prop.Prop = make([]*Properties, t.NumField())
+	prop.order = make([]int, t.NumField())
+
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		p := new(Properties)
+		name := f.Name
+		p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+		oneof := f.Tag.Get("protobuf_oneof") // special case
+		if oneof != "" {
+			// Oneof fields don't use the traditional protobuf tag.
+			p.OrigName = oneof
+		}
+		prop.Prop[i] = p
+		prop.order[i] = i
+		if debug {
+			print(i, " ", f.Name, " ", t.String(), " ")
+			if p.Tag > 0 {
+				print(p.String())
+			}
+			print("\n")
+		}
+	}
+
+	// Re-order prop.order.
+	sort.Sort(prop)
+
+	type oneofMessage interface {
+		XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+	}
+	if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+		var oots []interface{}
+		_, _, _, oots = om.XXX_OneofFuncs()
+
+		// Interpret oneof metadata.
+		prop.OneofTypes = make(map[string]*OneofProperties)
+		for _, oot := range oots {
+			oop := &OneofProperties{
+				Type: reflect.ValueOf(oot).Type(), // *T
+				Prop: new(Properties),
+			}
+			sft := oop.Type.Elem().Field(0)
+			oop.Prop.Name = sft.Name
+			oop.Prop.Parse(sft.Tag.Get("protobuf"))
+			// There will be exactly one interface field that
+			// this new value is assignable to.
+			for i := 0; i < t.NumField(); i++ {
+				f := t.Field(i)
+				if f.Type.Kind() != reflect.Interface {
+					continue
+				}
+				if !oop.Type.AssignableTo(f.Type) {
+					continue
+				}
+				oop.Field = i
+				break
+			}
+			prop.OneofTypes[oop.Prop.OrigName] = oop
+		}
+	}
+
+	// build required counts
+	// build tags
+	reqCount := 0
+	prop.decoderOrigNames = make(map[string]int)
+	for i, p := range prop.Prop {
+		if strings.HasPrefix(p.Name, "XXX_") {
+			// Internal fields should not appear in tags/origNames maps.
+			// They are handled specially when encoding and decoding.
+			continue
+		}
+		if p.Required {
+			reqCount++
+		}
+		prop.decoderTags.put(p.Tag, i)
+		prop.decoderOrigNames[p.OrigName] = i
+	}
+	prop.reqCount = reqCount
+
+	return prop
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+	if _, ok := enumValueMaps[typeName]; ok {
+		panic("proto: duplicate enum registered: " + typeName)
+	}
+	enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+	return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+	protoTypedNils = make(map[string]Message)      // a map from proto names to typed nil pointers
+	protoMapTypes  = make(map[string]reflect.Type) // a map from proto names to map types
+	revProtoTypes  = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+	if _, ok := protoTypedNils[name]; ok {
+		// TODO: Some day, make this a panic.
+		log.Printf("proto: duplicate proto type registered: %s", name)
+		return
+	}
+	t := reflect.TypeOf(x)
+	if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
+		// Generated code always calls RegisterType with nil x.
+		// This check is just for extra safety.
+		protoTypedNils[name] = x
+	} else {
+		protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
+	}
+	revProtoTypes[t] = name
+}
+
+// RegisterMapType is called from generated code and maps from the fully qualified
+// proto name to the native map type of the proto map definition.
+func RegisterMapType(x interface{}, name string) {
+	if reflect.TypeOf(x).Kind() != reflect.Map {
+		panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
+	}
+	if _, ok := protoMapTypes[name]; ok {
+		log.Printf("proto: duplicate proto type registered: %s", name)
+		return
+	}
+	t := reflect.TypeOf(x)
+	protoMapTypes[name] = t
+	revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+	type xname interface {
+		XXX_MessageName() string
+	}
+	if m, ok := x.(xname); ok {
+		return m.XXX_MessageName()
+	}
+	return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+// The type is not guaranteed to implement proto.Message if the name refers to a
+// map entry.
+func MessageType(name string) reflect.Type {
+	if t, ok := protoTypedNils[name]; ok {
+		return reflect.TypeOf(t)
+	}
+	return protoMapTypes[name]
+}
+
+// A registry of all linked proto files.
+var (
+	protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+	protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go
new file mode 100644
index 0000000..b167944
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go
@@ -0,0 +1,2767 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"unicode/utf8"
+)
+
+// a sizer takes a pointer to a field and the size of its tag, computes the size of
+// the encoded data.
+type sizer func(pointer, int) int
+
+// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
+// marshals the field to the end of the slice, returns the slice and error (if any).
+type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
+
+// marshalInfo is the information used for marshaling a message.
+type marshalInfo struct {
+	typ          reflect.Type
+	fields       []*marshalFieldInfo
+	unrecognized field                      // offset of XXX_unrecognized
+	extensions   field                      // offset of XXX_InternalExtensions
+	v1extensions field                      // offset of XXX_extensions
+	sizecache    field                      // offset of XXX_sizecache
+	initialized  int32                      // 0 -- only typ is set, 1 -- fully initialized
+	messageset   bool                       // uses message set wire format
+	hasmarshaler bool                       // has custom marshaler
+	sync.RWMutex                            // protect extElems map, also for initialization
+	extElems     map[int32]*marshalElemInfo // info of extension elements
+}
+
+// marshalFieldInfo is the information used for marshaling a field of a message.
+type marshalFieldInfo struct {
+	field      field
+	wiretag    uint64 // tag in wire format
+	tagsize    int    // size of tag in wire format
+	sizer      sizer
+	marshaler  marshaler
+	isPointer  bool
+	required   bool                              // field is required
+	name       string                            // name of the field, for error reporting
+	oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
+}
+
+// marshalElemInfo is the information used for marshaling an extension or oneof element.
+type marshalElemInfo struct {
+	wiretag   uint64 // tag in wire format
+	tagsize   int    // size of tag in wire format
+	sizer     sizer
+	marshaler marshaler
+	isptr     bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+}
+
+var (
+	marshalInfoMap  = map[reflect.Type]*marshalInfo{}
+	marshalInfoLock sync.Mutex
+)
+
+// getMarshalInfo returns the information to marshal a given type of message.
+// The info it returns may not necessarily initialized.
+// t is the type of the message (NOT the pointer to it).
+func getMarshalInfo(t reflect.Type) *marshalInfo {
+	marshalInfoLock.Lock()
+	u, ok := marshalInfoMap[t]
+	if !ok {
+		u = &marshalInfo{typ: t}
+		marshalInfoMap[t] = u
+	}
+	marshalInfoLock.Unlock()
+	return u
+}
+
+// Size is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It computes the size of encoded data of msg.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Size(msg Message) int {
+	u := getMessageMarshalInfo(msg, a)
+	ptr := toPointer(&msg)
+	if ptr.isNil() {
+		// We get here if msg is a typed nil ((*SomeMessage)(nil)),
+		// so it satisfies the interface, and msg == nil wouldn't
+		// catch it. We don't want crash in this case.
+		return 0
+	}
+	return u.size(ptr)
+}
+
+// Marshal is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It marshals msg to the end of b.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
+	u := getMessageMarshalInfo(msg, a)
+	ptr := toPointer(&msg)
+	if ptr.isNil() {
+		// We get here if msg is a typed nil ((*SomeMessage)(nil)),
+		// so it satisfies the interface, and msg == nil wouldn't
+		// catch it. We don't want crash in this case.
+		return b, ErrNil
+	}
+	return u.marshal(b, ptr, deterministic)
+}
+
+func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
+	// u := a.marshal, but atomically.
+	// We use an atomic here to ensure memory consistency.
+	u := atomicLoadMarshalInfo(&a.marshal)
+	if u == nil {
+		// Get marshal information from type of message.
+		t := reflect.ValueOf(msg).Type()
+		if t.Kind() != reflect.Ptr {
+			panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
+		}
+		u = getMarshalInfo(t.Elem())
+		// Store it in the cache for later users.
+		// a.marshal = u, but atomically.
+		atomicStoreMarshalInfo(&a.marshal, u)
+	}
+	return u
+}
+
+// size is the main function to compute the size of the encoded data of a message.
+// ptr is the pointer to the message.
+func (u *marshalInfo) size(ptr pointer) int {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeMarshalInfo()
+	}
+
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if u.hasmarshaler {
+		m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+		b, _ := m.Marshal()
+		return len(b)
+	}
+
+	n := 0
+	for _, f := range u.fields {
+		if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+			// nil pointer always marshals to nothing
+			continue
+		}
+		n += f.sizer(ptr.offset(f.field), f.tagsize)
+	}
+	if u.extensions.IsValid() {
+		e := ptr.offset(u.extensions).toExtensions()
+		if u.messageset {
+			n += u.sizeMessageSet(e)
+		} else {
+			n += u.sizeExtensions(e)
+		}
+	}
+	if u.v1extensions.IsValid() {
+		m := *ptr.offset(u.v1extensions).toOldExtensions()
+		n += u.sizeV1Extensions(m)
+	}
+	if u.unrecognized.IsValid() {
+		s := *ptr.offset(u.unrecognized).toBytes()
+		n += len(s)
+	}
+	// cache the result for use in marshal
+	if u.sizecache.IsValid() {
+		atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
+	}
+	return n
+}
+
+// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
+// fall back to compute the size.
+func (u *marshalInfo) cachedsize(ptr pointer) int {
+	if u.sizecache.IsValid() {
+		return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
+	}
+	return u.size(ptr)
+}
+
+// marshal is the main function to marshal a message. It takes a byte slice and appends
+// the encoded data to the end of the slice, returns the slice and error (if any).
+// ptr is the pointer to the message.
+// If deterministic is true, map is marshaled in deterministic order.
+func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeMarshalInfo()
+	}
+
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if u.hasmarshaler {
+		m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+		b1, err := m.Marshal()
+		b = append(b, b1...)
+		return b, err
+	}
+
+	var err, errLater error
+	// The old marshaler encodes extensions at beginning.
+	if u.extensions.IsValid() {
+		e := ptr.offset(u.extensions).toExtensions()
+		if u.messageset {
+			b, err = u.appendMessageSet(b, e, deterministic)
+		} else {
+			b, err = u.appendExtensions(b, e, deterministic)
+		}
+		if err != nil {
+			return b, err
+		}
+	}
+	if u.v1extensions.IsValid() {
+		m := *ptr.offset(u.v1extensions).toOldExtensions()
+		b, err = u.appendV1Extensions(b, m, deterministic)
+		if err != nil {
+			return b, err
+		}
+	}
+	for _, f := range u.fields {
+		if f.required {
+			if ptr.offset(f.field).getPointer().isNil() {
+				// Required field is not set.
+				// We record the error but keep going, to give a complete marshaling.
+				if errLater == nil {
+					errLater = &RequiredNotSetError{f.name}
+				}
+				continue
+			}
+		}
+		if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+			// nil pointer always marshals to nothing
+			continue
+		}
+		b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
+		if err != nil {
+			if err1, ok := err.(*RequiredNotSetError); ok {
+				// Required field in submessage is not set.
+				// We record the error but keep going, to give a complete marshaling.
+				if errLater == nil {
+					errLater = &RequiredNotSetError{f.name + "." + err1.field}
+				}
+				continue
+			}
+			if err == errRepeatedHasNil {
+				err = errors.New("proto: repeated field " + f.name + " has nil element")
+			}
+			if err == errInvalidUTF8 {
+				if errLater == nil {
+					fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+					errLater = &invalidUTF8Error{fullName}
+				}
+				continue
+			}
+			return b, err
+		}
+	}
+	if u.unrecognized.IsValid() {
+		s := *ptr.offset(u.unrecognized).toBytes()
+		b = append(b, s...)
+	}
+	return b, errLater
+}
+
+// computeMarshalInfo initializes the marshal info.
+func (u *marshalInfo) computeMarshalInfo() {
+	u.Lock()
+	defer u.Unlock()
+	if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
+		return
+	}
+
+	t := u.typ
+	u.unrecognized = invalidField
+	u.extensions = invalidField
+	u.v1extensions = invalidField
+	u.sizecache = invalidField
+
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if reflect.PtrTo(t).Implements(marshalerType) {
+		u.hasmarshaler = true
+		atomic.StoreInt32(&u.initialized, 1)
+		return
+	}
+
+	// get oneof implementers
+	var oneofImplementers []interface{}
+	if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+		_, _, _, oneofImplementers = m.XXX_OneofFuncs()
+	}
+
+	n := t.NumField()
+
+	// deal with XXX fields first
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		if !strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		switch f.Name {
+		case "XXX_sizecache":
+			u.sizecache = toField(&f)
+		case "XXX_unrecognized":
+			u.unrecognized = toField(&f)
+		case "XXX_InternalExtensions":
+			u.extensions = toField(&f)
+			u.messageset = f.Tag.Get("protobuf_messageset") == "1"
+		case "XXX_extensions":
+			u.v1extensions = toField(&f)
+		case "XXX_NoUnkeyedLiteral":
+			// nothing to do
+		default:
+			panic("unknown XXX field: " + f.Name)
+		}
+		n--
+	}
+
+	// normal fields
+	fields := make([]marshalFieldInfo, n) // batch allocation
+	u.fields = make([]*marshalFieldInfo, 0, n)
+	for i, j := 0, 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		field := &fields[j]
+		j++
+		field.name = f.Name
+		u.fields = append(u.fields, field)
+		if f.Tag.Get("protobuf_oneof") != "" {
+			field.computeOneofFieldInfo(&f, oneofImplementers)
+			continue
+		}
+		if f.Tag.Get("protobuf") == "" {
+			// field has no tag (not in generated message), ignore it
+			u.fields = u.fields[:len(u.fields)-1]
+			j--
+			continue
+		}
+		field.computeMarshalFieldInfo(&f)
+	}
+
+	// fields are marshaled in tag order on the wire.
+	sort.Sort(byTag(u.fields))
+
+	atomic.StoreInt32(&u.initialized, 1)
+}
+
+// helper for sorting fields by tag
+type byTag []*marshalFieldInfo
+
+func (a byTag) Len() int           { return len(a) }
+func (a byTag) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
+
+// getExtElemInfo returns the information to marshal an extension element.
+// The info it returns is initialized.
+func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
+	// get from cache first
+	u.RLock()
+	e, ok := u.extElems[desc.Field]
+	u.RUnlock()
+	if ok {
+		return e
+	}
+
+	t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
+	tags := strings.Split(desc.Tag, ",")
+	tag, err := strconv.Atoi(tags[1])
+	if err != nil {
+		panic("tag is not an integer")
+	}
+	wt := wiretype(tags[0])
+	sizer, marshaler := typeMarshaler(t, tags, false, false)
+	e = &marshalElemInfo{
+		wiretag:   uint64(tag)<<3 | wt,
+		tagsize:   SizeVarint(uint64(tag) << 3),
+		sizer:     sizer,
+		marshaler: marshaler,
+		isptr:     t.Kind() == reflect.Ptr,
+	}
+
+	// update cache
+	u.Lock()
+	if u.extElems == nil {
+		u.extElems = make(map[int32]*marshalElemInfo)
+	}
+	u.extElems[desc.Field] = e
+	u.Unlock()
+	return e
+}
+
+// computeMarshalFieldInfo fills up the information to marshal a field.
+func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
+	// parse protobuf tag of the field.
+	// tag has format of "bytes,49,opt,name=foo,def=hello!"
+	tags := strings.Split(f.Tag.Get("protobuf"), ",")
+	if tags[0] == "" {
+		return
+	}
+	tag, err := strconv.Atoi(tags[1])
+	if err != nil {
+		panic("tag is not an integer")
+	}
+	wt := wiretype(tags[0])
+	if tags[2] == "req" {
+		fi.required = true
+	}
+	fi.setTag(f, tag, wt)
+	fi.setMarshaler(f, tags)
+}
+
+func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
+	fi.field = toField(f)
+	fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+	fi.isPointer = true
+	fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
+	fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
+
+	ityp := f.Type // interface type
+	for _, o := range oneofImplementers {
+		t := reflect.TypeOf(o)
+		if !t.Implements(ityp) {
+			continue
+		}
+		sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
+		tags := strings.Split(sf.Tag.Get("protobuf"), ",")
+		tag, err := strconv.Atoi(tags[1])
+		if err != nil {
+			panic("tag is not an integer")
+		}
+		wt := wiretype(tags[0])
+		sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
+		fi.oneofElems[t.Elem()] = &marshalElemInfo{
+			wiretag:   uint64(tag)<<3 | wt,
+			tagsize:   SizeVarint(uint64(tag) << 3),
+			sizer:     sizer,
+			marshaler: marshaler,
+		}
+	}
+}
+
+type oneofMessage interface {
+	XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+}
+
+// wiretype returns the wire encoding of the type.
+func wiretype(encoding string) uint64 {
+	switch encoding {
+	case "fixed32":
+		return WireFixed32
+	case "fixed64":
+		return WireFixed64
+	case "varint", "zigzag32", "zigzag64":
+		return WireVarint
+	case "bytes":
+		return WireBytes
+	case "group":
+		return WireStartGroup
+	}
+	panic("unknown wire type " + encoding)
+}
+
+// setTag fills up the tag (in wire format) and its size in the info of a field.
+func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
+	fi.field = toField(f)
+	fi.wiretag = uint64(tag)<<3 | wt
+	fi.tagsize = SizeVarint(uint64(tag) << 3)
+}
+
+// setMarshaler fills up the sizer and marshaler in the info of a field.
+func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
+	switch f.Type.Kind() {
+	case reflect.Map:
+		// map field
+		fi.isPointer = true
+		fi.sizer, fi.marshaler = makeMapMarshaler(f)
+		return
+	case reflect.Ptr, reflect.Slice:
+		fi.isPointer = true
+	}
+	fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
+}
+
+// typeMarshaler returns the sizer and marshaler of a given field.
+// t is the type of the field.
+// tags is the generated "protobuf" tag of the field.
+// If nozero is true, zero value is not marshaled to the wire.
+// If oneof is true, it is a oneof field.
+func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
+	encoding := tags[0]
+
+	pointer := false
+	slice := false
+	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+		slice = true
+		t = t.Elem()
+	}
+	if t.Kind() == reflect.Ptr {
+		pointer = true
+		t = t.Elem()
+	}
+
+	packed := false
+	proto3 := false
+	validateUTF8 := true
+	for i := 2; i < len(tags); i++ {
+		if tags[i] == "packed" {
+			packed = true
+		}
+		if tags[i] == "proto3" {
+			proto3 = true
+		}
+	}
+	validateUTF8 = validateUTF8 && proto3
+
+	switch t.Kind() {
+	case reflect.Bool:
+		if pointer {
+			return sizeBoolPtr, appendBoolPtr
+		}
+		if slice {
+			if packed {
+				return sizeBoolPackedSlice, appendBoolPackedSlice
+			}
+			return sizeBoolSlice, appendBoolSlice
+		}
+		if nozero {
+			return sizeBoolValueNoZero, appendBoolValueNoZero
+		}
+		return sizeBoolValue, appendBoolValue
+	case reflect.Uint32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return sizeFixed32Ptr, appendFixed32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixed32PackedSlice, appendFixed32PackedSlice
+				}
+				return sizeFixed32Slice, appendFixed32Slice
+			}
+			if nozero {
+				return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
+			}
+			return sizeFixed32Value, appendFixed32Value
+		case "varint":
+			if pointer {
+				return sizeVarint32Ptr, appendVarint32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarint32PackedSlice, appendVarint32PackedSlice
+				}
+				return sizeVarint32Slice, appendVarint32Slice
+			}
+			if nozero {
+				return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
+			}
+			return sizeVarint32Value, appendVarint32Value
+		}
+	case reflect.Int32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return sizeFixedS32Ptr, appendFixedS32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
+				}
+				return sizeFixedS32Slice, appendFixedS32Slice
+			}
+			if nozero {
+				return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
+			}
+			return sizeFixedS32Value, appendFixedS32Value
+		case "varint":
+			if pointer {
+				return sizeVarintS32Ptr, appendVarintS32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
+				}
+				return sizeVarintS32Slice, appendVarintS32Slice
+			}
+			if nozero {
+				return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
+			}
+			return sizeVarintS32Value, appendVarintS32Value
+		case "zigzag32":
+			if pointer {
+				return sizeZigzag32Ptr, appendZigzag32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
+				}
+				return sizeZigzag32Slice, appendZigzag32Slice
+			}
+			if nozero {
+				return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
+			}
+			return sizeZigzag32Value, appendZigzag32Value
+		}
+	case reflect.Uint64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return sizeFixed64Ptr, appendFixed64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixed64PackedSlice, appendFixed64PackedSlice
+				}
+				return sizeFixed64Slice, appendFixed64Slice
+			}
+			if nozero {
+				return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
+			}
+			return sizeFixed64Value, appendFixed64Value
+		case "varint":
+			if pointer {
+				return sizeVarint64Ptr, appendVarint64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarint64PackedSlice, appendVarint64PackedSlice
+				}
+				return sizeVarint64Slice, appendVarint64Slice
+			}
+			if nozero {
+				return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
+			}
+			return sizeVarint64Value, appendVarint64Value
+		}
+	case reflect.Int64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return sizeFixedS64Ptr, appendFixedS64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
+				}
+				return sizeFixedS64Slice, appendFixedS64Slice
+			}
+			if nozero {
+				return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
+			}
+			return sizeFixedS64Value, appendFixedS64Value
+		case "varint":
+			if pointer {
+				return sizeVarintS64Ptr, appendVarintS64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
+				}
+				return sizeVarintS64Slice, appendVarintS64Slice
+			}
+			if nozero {
+				return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
+			}
+			return sizeVarintS64Value, appendVarintS64Value
+		case "zigzag64":
+			if pointer {
+				return sizeZigzag64Ptr, appendZigzag64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
+				}
+				return sizeZigzag64Slice, appendZigzag64Slice
+			}
+			if nozero {
+				return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
+			}
+			return sizeZigzag64Value, appendZigzag64Value
+		}
+	case reflect.Float32:
+		if pointer {
+			return sizeFloat32Ptr, appendFloat32Ptr
+		}
+		if slice {
+			if packed {
+				return sizeFloat32PackedSlice, appendFloat32PackedSlice
+			}
+			return sizeFloat32Slice, appendFloat32Slice
+		}
+		if nozero {
+			return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
+		}
+		return sizeFloat32Value, appendFloat32Value
+	case reflect.Float64:
+		if pointer {
+			return sizeFloat64Ptr, appendFloat64Ptr
+		}
+		if slice {
+			if packed {
+				return sizeFloat64PackedSlice, appendFloat64PackedSlice
+			}
+			return sizeFloat64Slice, appendFloat64Slice
+		}
+		if nozero {
+			return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
+		}
+		return sizeFloat64Value, appendFloat64Value
+	case reflect.String:
+		if validateUTF8 {
+			if pointer {
+				return sizeStringPtr, appendUTF8StringPtr
+			}
+			if slice {
+				return sizeStringSlice, appendUTF8StringSlice
+			}
+			if nozero {
+				return sizeStringValueNoZero, appendUTF8StringValueNoZero
+			}
+			return sizeStringValue, appendUTF8StringValue
+		}
+		if pointer {
+			return sizeStringPtr, appendStringPtr
+		}
+		if slice {
+			return sizeStringSlice, appendStringSlice
+		}
+		if nozero {
+			return sizeStringValueNoZero, appendStringValueNoZero
+		}
+		return sizeStringValue, appendStringValue
+	case reflect.Slice:
+		if slice {
+			return sizeBytesSlice, appendBytesSlice
+		}
+		if oneof {
+			// Oneof bytes field may also have "proto3" tag.
+			// We want to marshal it as a oneof field. Do this
+			// check before the proto3 check.
+			return sizeBytesOneof, appendBytesOneof
+		}
+		if proto3 {
+			return sizeBytes3, appendBytes3
+		}
+		return sizeBytes, appendBytes
+	case reflect.Struct:
+		switch encoding {
+		case "group":
+			if slice {
+				return makeGroupSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeGroupMarshaler(getMarshalInfo(t))
+		case "bytes":
+			if slice {
+				return makeMessageSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeMessageMarshaler(getMarshalInfo(t))
+		}
+	}
+	panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
+}
+
+// Below are functions to size/marshal a specific type of a field.
+// They are stored in the field's info, and called by function pointers.
+// They have type sizer or marshaler.
+
+func sizeFixed32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixed32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixed32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixedS32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixedS32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFloat32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
+	v := math.Float32bits(*ptr.toFloat32())
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFloat32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toFloat32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFloat32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixed64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixed64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixed64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFixedS64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixedS64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFloat64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
+	v := math.Float64bits(*ptr.toFloat64())
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFloat64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toFloat64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFloat64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeVarint32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarint32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarint64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	return SizeVarint(v) + tagsize
+}
+func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(v) + tagsize
+}
+func sizeVarint64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(*p) + tagsize
+}
+func sizeVarint64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v) + tagsize
+	}
+	return n
+}
+func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v)
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+	}
+	return n
+}
+func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+	}
+	return n
+}
+func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeBoolValue(_ pointer, tagsize int) int {
+	return 1 + tagsize
+}
+func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toBool()
+	if !v {
+		return 0
+	}
+	return 1 + tagsize
+}
+func sizeBoolPtr(ptr pointer, tagsize int) int {
+	p := *ptr.toBoolPtr()
+	if p == nil {
+		return 0
+	}
+	return 1 + tagsize
+}
+func sizeBoolSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBoolSlice()
+	return (1 + tagsize) * len(s)
+}
+func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBoolSlice()
+	if len(s) == 0 {
+		return 0
+	}
+	return len(s) + SizeVarint(uint64(len(s))) + tagsize
+}
+func sizeStringValue(ptr pointer, tagsize int) int {
+	v := *ptr.toString()
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toString()
+	if v == "" {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringPtr(ptr pointer, tagsize int) int {
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toStringSlice()
+	n := 0
+	for _, v := range s {
+		n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+	}
+	return n
+}
+func sizeBytes(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	if v == nil {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytes3(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	if len(v) == 0 {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesOneof(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBytesSlice()
+	n := 0
+	for _, v := range s {
+		n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+	}
+	return n
+}
+
+// appendFixed32 appends an encoded fixed32 to b.
+func appendFixed32(b []byte, v uint32) []byte {
+	b = append(b,
+		byte(v),
+		byte(v>>8),
+		byte(v>>16),
+		byte(v>>24))
+	return b
+}
+
+// appendFixed64 appends an encoded fixed64 to b.
+func appendFixed64(b []byte, v uint64) []byte {
+	b = append(b,
+		byte(v),
+		byte(v>>8),
+		byte(v>>16),
+		byte(v>>24),
+		byte(v>>32),
+		byte(v>>40),
+		byte(v>>48),
+		byte(v>>56))
+	return b
+}
+
+// appendVarint appends an encoded varint to b.
+func appendVarint(b []byte, v uint64) []byte {
+	// TODO: make 1-byte (maybe 2-byte) case inline-able, once we
+	// have non-leaf inliner.
+	switch {
+	case v < 1<<7:
+		b = append(b, byte(v))
+	case v < 1<<14:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte(v>>7))
+	case v < 1<<21:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte(v>>14))
+	case v < 1<<28:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte(v>>21))
+	case v < 1<<35:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte(v>>28))
+	case v < 1<<42:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte(v>>35))
+	case v < 1<<49:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte(v>>42))
+	case v < 1<<56:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte(v>>49))
+	case v < 1<<63:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte((v>>49)&0x7f|0x80),
+			byte(v>>56))
+	default:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte((v>>49)&0x7f|0x80),
+			byte((v>>56)&0x7f|0x80),
+			1)
+	}
+	return b
+}
+
+func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, *p)
+	return b, nil
+}
+func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, v)
+	}
+	return b, nil
+}
+func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, v)
+	}
+	return b, nil
+}
+func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(v))
+	return b, nil
+}
+func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(v))
+	return b, nil
+}
+func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(*p))
+	return b, nil
+}
+func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, uint32(v))
+	}
+	return b, nil
+}
+func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, uint32(v))
+	}
+	return b, nil
+}
+func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float32bits(*ptr.toFloat32())
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float32bits(*ptr.toFloat32())
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toFloat32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, math.Float32bits(*p))
+	return b, nil
+}
+func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, math.Float32bits(v))
+	}
+	return b, nil
+}
+func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, math.Float32bits(v))
+	}
+	return b, nil
+}
+func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, *p)
+	return b, nil
+}
+func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, v)
+	}
+	return b, nil
+}
+func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, v)
+	}
+	return b, nil
+}
+func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(v))
+	return b, nil
+}
+func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(v))
+	return b, nil
+}
+func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(*p))
+	return b, nil
+}
+func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, uint64(v))
+	}
+	return b, nil
+}
+func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, uint64(v))
+	}
+	return b, nil
+}
+func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float64bits(*ptr.toFloat64())
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float64bits(*ptr.toFloat64())
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toFloat64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, math.Float64bits(*p))
+	return b, nil
+}
+func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, math.Float64bits(v))
+	}
+	return b, nil
+}
+func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, math.Float64bits(v))
+	}
+	return b, nil
+}
+func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, v)
+	return b, nil
+}
+func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, v)
+	return b, nil
+}
+func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, *p)
+	return b, nil
+}
+func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, v)
+	}
+	return b, nil
+}
+func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v)
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, v)
+	}
+	return b, nil
+}
+func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	v := *p
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	}
+	return b, nil
+}
+func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	}
+	return b, nil
+}
+func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	v := *p
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	}
+	return b, nil
+}
+func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	}
+	return b, nil
+}
+func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBool()
+	b = appendVarint(b, wiretag)
+	if v {
+		b = append(b, 1)
+	} else {
+		b = append(b, 0)
+	}
+	return b, nil
+}
+func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBool()
+	if !v {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = append(b, 1)
+	return b, nil
+}
+
+func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toBoolPtr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	if *p {
+		b = append(b, 1)
+	} else {
+		b = append(b, 0)
+	}
+	return b, nil
+}
+func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBoolSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		if v {
+			b = append(b, 1)
+		} else {
+			b = append(b, 0)
+		}
+	}
+	return b, nil
+}
+func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBoolSlice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(len(s)))
+	for _, v := range s {
+		if v {
+			b = append(b, 1)
+		} else {
+			b = append(b, 0)
+		}
+	}
+	return b, nil
+}
+func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toString()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toString()
+	if v == "" {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return b, nil
+	}
+	v := *p
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toStringSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	return b, nil
+}
+func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	v := *ptr.toString()
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	v := *ptr.toString()
+	if v == "" {
+		return b, nil
+	}
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return b, nil
+	}
+	v := *p
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	s := *ptr.toStringSlice()
+	for _, v := range s {
+		if !utf8.ValidString(v) {
+			invalidUTF8 = true
+		}
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	if v == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	if len(v) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBytesSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	return b, nil
+}
+
+// makeGroupMarshaler returns the sizer and marshaler for a group.
+// u is the marshal info of the underlying message.
+func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			return u.size(p) + 2*tagsize
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return b, nil
+			}
+			var err error
+			b = appendVarint(b, wiretag) // start group
+			b, err = u.marshal(b, p, deterministic)
+			b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+			return b, err
+		}
+}
+
+// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
+// u is the marshal info of the underlying message.
+func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getPointerSlice()
+			n := 0
+			for _, v := range s {
+				if v.isNil() {
+					continue
+				}
+				n += u.size(v) + 2*tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getPointerSlice()
+			var err error
+			var nerr nonFatal
+			for _, v := range s {
+				if v.isNil() {
+					return b, errRepeatedHasNil
+				}
+				b = appendVarint(b, wiretag) // start group
+				b, err = u.marshal(b, v, deterministic)
+				b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+				if !nerr.Merge(err) {
+					if err == ErrNil {
+						err = errRepeatedHasNil
+					}
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeMessageMarshaler returns the sizer and marshaler for a message field.
+// u is the marshal info of the message.
+func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			siz := u.size(p)
+			return siz + SizeVarint(uint64(siz)) + tagsize
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return b, nil
+			}
+			b = appendVarint(b, wiretag)
+			siz := u.cachedsize(p)
+			b = appendVarint(b, uint64(siz))
+			return u.marshal(b, p, deterministic)
+		}
+}
+
+// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
+// u is the marshal info of the message.
+func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getPointerSlice()
+			n := 0
+			for _, v := range s {
+				if v.isNil() {
+					continue
+				}
+				siz := u.size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getPointerSlice()
+			var err error
+			var nerr nonFatal
+			for _, v := range s {
+				if v.isNil() {
+					return b, errRepeatedHasNil
+				}
+				b = appendVarint(b, wiretag)
+				siz := u.cachedsize(v)
+				b = appendVarint(b, uint64(siz))
+				b, err = u.marshal(b, v, deterministic)
+
+				if !nerr.Merge(err) {
+					if err == ErrNil {
+						err = errRepeatedHasNil
+					}
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeMapMarshaler returns the sizer and marshaler for a map field.
+// f is the pointer to the reflect data structure of the field.
+func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
+	// figure out key and value type
+	t := f.Type
+	keyType := t.Key()
+	valType := t.Elem()
+	keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
+	valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+	keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
+	valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
+	keyWireTag := 1<<3 | wiretype(keyTags[0])
+	valWireTag := 2<<3 | wiretype(valTags[0])
+
+	// We create an interface to get the addresses of the map key and value.
+	// If value is pointer-typed, the interface is a direct interface, the
+	// idata itself is the value. Otherwise, the idata is the pointer to the
+	// value.
+	// Key cannot be pointer-typed.
+	valIsPtr := valType.Kind() == reflect.Ptr
+
+	// If value is a message with nested maps, calling
+	// valSizer in marshal may be quadratic. We should use
+	// cached version in marshal (but not in size).
+	// If value is not message type, we don't have size cache,
+	// but it cannot be nested either. Just use valSizer.
+	valCachedSizer := valSizer
+	if valIsPtr && valType.Elem().Kind() == reflect.Struct {
+		u := getMarshalInfo(valType.Elem())
+		valCachedSizer = func(ptr pointer, tagsize int) int {
+			// Same as message sizer, but use cache.
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			siz := u.cachedsize(p)
+			return siz + SizeVarint(uint64(siz)) + tagsize
+		}
+	}
+	return func(ptr pointer, tagsize int) int {
+			m := ptr.asPointerTo(t).Elem() // the map
+			n := 0
+			for _, k := range m.MapKeys() {
+				ki := k.Interface()
+				vi := m.MapIndex(k).Interface()
+				kaddr := toAddrPointer(&ki, false)             // pointer to key
+				vaddr := toAddrPointer(&vi, valIsPtr)          // pointer to value
+				siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
+			m := ptr.asPointerTo(t).Elem() // the map
+			var err error
+			keys := m.MapKeys()
+			if len(keys) > 1 && deterministic {
+				sort.Sort(mapKeys(keys))
+			}
+
+			var nerr nonFatal
+			for _, k := range keys {
+				ki := k.Interface()
+				vi := m.MapIndex(k).Interface()
+				kaddr := toAddrPointer(&ki, false)    // pointer to key
+				vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+				b = appendVarint(b, tag)
+				siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+				b = appendVarint(b, uint64(siz))
+				b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
+				if !nerr.Merge(err) {
+					return b, err
+				}
+				b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
+				if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
+// fi is the marshal info of the field.
+// f is the pointer to the reflect data structure of the field.
+func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
+	// Oneof field is an interface. We need to get the actual data type on the fly.
+	t := f.Type
+	return func(ptr pointer, _ int) int {
+			p := ptr.getInterfacePointer()
+			if p.isNil() {
+				return 0
+			}
+			v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+			telem := v.Type()
+			e := fi.oneofElems[telem]
+			return e.sizer(p, e.tagsize)
+		},
+		func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getInterfacePointer()
+			if p.isNil() {
+				return b, nil
+			}
+			v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+			telem := v.Type()
+			if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
+				return b, errOneofHasNil
+			}
+			e := fi.oneofElems[telem]
+			return e.marshaler(b, p, e.wiretag, deterministic)
+		}
+}
+
+// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
+func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return 0
+	}
+	mu.Lock()
+
+	n := 0
+	for _, e := range m {
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			n += len(e.enc)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		n += ei.sizer(p, ei.tagsize)
+	}
+	mu.Unlock()
+	return n
+}
+
+// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
+func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return b, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+
+	var err error
+	var nerr nonFatal
+
+	// Fast-path for common cases: zero or one extensions.
+	// Don't bother sorting the keys.
+	if len(m) <= 1 {
+		for _, e := range m {
+			if e.value == nil || e.desc == nil {
+				// Extension is only in its encoded form.
+				b = append(b, e.enc...)
+				continue
+			}
+
+			// We don't skip extensions that have an encoded form set,
+			// because the extension value may have been mutated after
+			// the last time this function was called.
+
+			ei := u.getExtElemInfo(e.desc)
+			v := e.value
+			p := toAddrPointer(&v, ei.isptr)
+			b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+			if !nerr.Merge(err) {
+				return b, err
+			}
+		}
+		return b, nerr.E
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	// Not sure this is required, but the old code does it.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	for _, k := range keys {
+		e := m[int32(k)]
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			b = append(b, e.enc...)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// message set format is:
+//   message MessageSet {
+//     repeated group Item = 1 {
+//       required int32 type_id = 2;
+//       required string message = 3;
+//     };
+//   }
+
+// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
+// in message set format (above).
+func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return 0
+	}
+	mu.Lock()
+
+	n := 0
+	for id, e := range m {
+		n += 2                          // start group, end group. tag = 1 (size=1)
+		n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
+
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+			siz := len(msgWithLen)
+			n += siz + 1 // message, tag = 3 (size=1)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		n += ei.sizer(p, 1) // message, tag = 3 (size=1)
+	}
+	mu.Unlock()
+	return n
+}
+
+// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
+// to the end of byte slice b.
+func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return b, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+
+	var err error
+	var nerr nonFatal
+
+	// Fast-path for common cases: zero or one extensions.
+	// Don't bother sorting the keys.
+	if len(m) <= 1 {
+		for id, e := range m {
+			b = append(b, 1<<3|WireStartGroup)
+			b = append(b, 2<<3|WireVarint)
+			b = appendVarint(b, uint64(id))
+
+			if e.value == nil || e.desc == nil {
+				// Extension is only in its encoded form.
+				msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+				b = append(b, 3<<3|WireBytes)
+				b = append(b, msgWithLen...)
+				b = append(b, 1<<3|WireEndGroup)
+				continue
+			}
+
+			// We don't skip extensions that have an encoded form set,
+			// because the extension value may have been mutated after
+			// the last time this function was called.
+
+			ei := u.getExtElemInfo(e.desc)
+			v := e.value
+			p := toAddrPointer(&v, ei.isptr)
+			b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+			if !nerr.Merge(err) {
+				return b, err
+			}
+			b = append(b, 1<<3|WireEndGroup)
+		}
+		return b, nerr.E
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	for _, id := range keys {
+		e := m[int32(id)]
+		b = append(b, 1<<3|WireStartGroup)
+		b = append(b, 2<<3|WireVarint)
+		b = appendVarint(b, uint64(id))
+
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+			b = append(b, 3<<3|WireBytes)
+			b = append(b, msgWithLen...)
+			b = append(b, 1<<3|WireEndGroup)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+		b = append(b, 1<<3|WireEndGroup)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
+func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
+	if m == nil {
+		return 0
+	}
+
+	n := 0
+	for _, e := range m {
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			n += len(e.enc)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		n += ei.sizer(p, ei.tagsize)
+	}
+	return n
+}
+
+// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
+func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
+	if m == nil {
+		return b, nil
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	var err error
+	var nerr nonFatal
+	for _, k := range keys {
+		e := m[int32(k)]
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			b = append(b, e.enc...)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// newMarshaler is the interface representing objects that can marshal themselves.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newMarshaler interface {
+	XXX_Size() int
+	XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
+}
+
+// Size returns the encoded size of a protocol buffer message.
+// This is the main entry point.
+func Size(pb Message) int {
+	if m, ok := pb.(newMarshaler); ok {
+		return m.XXX_Size()
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		b, _ := m.Marshal()
+		return len(b)
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return 0
+	}
+	var info InternalMessageInfo
+	return info.Size(pb)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, returning the data.
+// This is the main entry point.
+func Marshal(pb Message) ([]byte, error) {
+	if m, ok := pb.(newMarshaler); ok {
+		siz := m.XXX_Size()
+		b := make([]byte, 0, siz)
+		return m.XXX_Marshal(b, false)
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		return m.Marshal()
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return nil, ErrNil
+	}
+	var info InternalMessageInfo
+	siz := info.Size(pb)
+	b := make([]byte, 0, siz)
+	return info.Marshal(b, pb, false)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+// This is an alternative entry point. It is not necessary to use
+// a Buffer for most applications.
+func (p *Buffer) Marshal(pb Message) error {
+	var err error
+	if m, ok := pb.(newMarshaler); ok {
+		siz := m.XXX_Size()
+		p.grow(siz) // make sure buf has enough capacity
+		p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
+		return err
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		b, err := m.Marshal()
+		p.buf = append(p.buf, b...)
+		return err
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return ErrNil
+	}
+	var info InternalMessageInfo
+	siz := info.Size(pb)
+	p.grow(siz) // make sure buf has enough capacity
+	p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
+	return err
+}
+
+// grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+func (p *Buffer) grow(n int) {
+	need := len(p.buf) + n
+	if need <= cap(p.buf) {
+		return
+	}
+	newCap := len(p.buf) * 2
+	if newCap < need {
+		newCap = need
+	}
+	p.buf = append(make([]byte, 0, newCap), p.buf...)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go
new file mode 100644
index 0000000..5525def
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_merge.go
@@ -0,0 +1,654 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+	"sync/atomic"
+)
+
+// Merge merges the src message into dst.
+// This assumes that dst and src of the same type and are non-nil.
+func (a *InternalMessageInfo) Merge(dst, src Message) {
+	mi := atomicLoadMergeInfo(&a.merge)
+	if mi == nil {
+		mi = getMergeInfo(reflect.TypeOf(dst).Elem())
+		atomicStoreMergeInfo(&a.merge, mi)
+	}
+	mi.merge(toPointer(&dst), toPointer(&src))
+}
+
+type mergeInfo struct {
+	typ reflect.Type
+
+	initialized int32 // 0: only typ is valid, 1: everything is valid
+	lock        sync.Mutex
+
+	fields       []mergeFieldInfo
+	unrecognized field // Offset of XXX_unrecognized
+}
+
+type mergeFieldInfo struct {
+	field field // Offset of field, guaranteed to be valid
+
+	// isPointer reports whether the value in the field is a pointer.
+	// This is true for the following situations:
+	//	* Pointer to struct
+	//	* Pointer to basic type (proto2 only)
+	//	* Slice (first value in slice header is a pointer)
+	//	* String (first value in string header is a pointer)
+	isPointer bool
+
+	// basicWidth reports the width of the field assuming that it is directly
+	// embedded in the struct (as is the case for basic types in proto3).
+	// The possible values are:
+	// 	0: invalid
+	//	1: bool
+	//	4: int32, uint32, float32
+	//	8: int64, uint64, float64
+	basicWidth int
+
+	// Where dst and src are pointers to the types being merged.
+	merge func(dst, src pointer)
+}
+
+var (
+	mergeInfoMap  = map[reflect.Type]*mergeInfo{}
+	mergeInfoLock sync.Mutex
+)
+
+func getMergeInfo(t reflect.Type) *mergeInfo {
+	mergeInfoLock.Lock()
+	defer mergeInfoLock.Unlock()
+	mi := mergeInfoMap[t]
+	if mi == nil {
+		mi = &mergeInfo{typ: t}
+		mergeInfoMap[t] = mi
+	}
+	return mi
+}
+
+// merge merges src into dst assuming they are both of type *mi.typ.
+func (mi *mergeInfo) merge(dst, src pointer) {
+	if dst.isNil() {
+		panic("proto: nil destination")
+	}
+	if src.isNil() {
+		return // Nothing to do.
+	}
+
+	if atomic.LoadInt32(&mi.initialized) == 0 {
+		mi.computeMergeInfo()
+	}
+
+	for _, fi := range mi.fields {
+		sfp := src.offset(fi.field)
+
+		// As an optimization, we can avoid the merge function call cost
+		// if we know for sure that the source will have no effect
+		// by checking if it is the zero value.
+		if unsafeAllowed {
+			if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
+				continue
+			}
+			if fi.basicWidth > 0 {
+				switch {
+				case fi.basicWidth == 1 && !*sfp.toBool():
+					continue
+				case fi.basicWidth == 4 && *sfp.toUint32() == 0:
+					continue
+				case fi.basicWidth == 8 && *sfp.toUint64() == 0:
+					continue
+				}
+			}
+		}
+
+		dfp := dst.offset(fi.field)
+		fi.merge(dfp, sfp)
+	}
+
+	// TODO: Make this faster?
+	out := dst.asPointerTo(mi.typ).Elem()
+	in := src.asPointerTo(mi.typ).Elem()
+	if emIn, err := extendable(in.Addr().Interface()); err == nil {
+		emOut, _ := extendable(out.Addr().Interface())
+		mIn, muIn := emIn.extensionsRead()
+		if mIn != nil {
+			mOut := emOut.extensionsWrite()
+			muIn.Lock()
+			mergeExtension(mOut, mIn)
+			muIn.Unlock()
+		}
+	}
+
+	if mi.unrecognized.IsValid() {
+		if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
+			*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
+		}
+	}
+}
+
+func (mi *mergeInfo) computeMergeInfo() {
+	mi.lock.Lock()
+	defer mi.lock.Unlock()
+	if mi.initialized != 0 {
+		return
+	}
+	t := mi.typ
+	n := t.NumField()
+
+	props := GetProperties(t)
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+
+		mfi := mergeFieldInfo{field: toField(&f)}
+		tf := f.Type
+
+		// As an optimization, we can avoid the merge function call cost
+		// if we know for sure that the source will have no effect
+		// by checking if it is the zero value.
+		if unsafeAllowed {
+			switch tf.Kind() {
+			case reflect.Ptr, reflect.Slice, reflect.String:
+				// As a special case, we assume slices and strings are pointers
+				// since we know that the first field in the SliceSlice or
+				// StringHeader is a data pointer.
+				mfi.isPointer = true
+			case reflect.Bool:
+				mfi.basicWidth = 1
+			case reflect.Int32, reflect.Uint32, reflect.Float32:
+				mfi.basicWidth = 4
+			case reflect.Int64, reflect.Uint64, reflect.Float64:
+				mfi.basicWidth = 8
+			}
+		}
+
+		// Unwrap tf to get at its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic("both pointer and slice for basic type in " + tf.Name())
+		}
+
+		switch tf.Kind() {
+		case reflect.Int32:
+			switch {
+			case isSlice: // E.g., []int32
+				mfi.merge = func(dst, src pointer) {
+					// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
+					/*
+						sfsp := src.toInt32Slice()
+						if *sfsp != nil {
+							dfsp := dst.toInt32Slice()
+							*dfsp = append(*dfsp, *sfsp...)
+							if *dfsp == nil {
+								*dfsp = []int64{}
+							}
+						}
+					*/
+					sfs := src.getInt32Slice()
+					if sfs != nil {
+						dfs := dst.getInt32Slice()
+						dfs = append(dfs, sfs...)
+						if dfs == nil {
+							dfs = []int32{}
+						}
+						dst.setInt32Slice(dfs)
+					}
+				}
+			case isPointer: // E.g., *int32
+				mfi.merge = func(dst, src pointer) {
+					// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
+					/*
+						sfpp := src.toInt32Ptr()
+						if *sfpp != nil {
+							dfpp := dst.toInt32Ptr()
+							if *dfpp == nil {
+								*dfpp = Int32(**sfpp)
+							} else {
+								**dfpp = **sfpp
+							}
+						}
+					*/
+					sfp := src.getInt32Ptr()
+					if sfp != nil {
+						dfp := dst.getInt32Ptr()
+						if dfp == nil {
+							dst.setInt32Ptr(*sfp)
+						} else {
+							*dfp = *sfp
+						}
+					}
+				}
+			default: // E.g., int32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toInt32(); v != 0 {
+						*dst.toInt32() = v
+					}
+				}
+			}
+		case reflect.Int64:
+			switch {
+			case isSlice: // E.g., []int64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toInt64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toInt64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []int64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *int64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toInt64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toInt64Ptr()
+						if *dfpp == nil {
+							*dfpp = Int64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., int64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toInt64(); v != 0 {
+						*dst.toInt64() = v
+					}
+				}
+			}
+		case reflect.Uint32:
+			switch {
+			case isSlice: // E.g., []uint32
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toUint32Slice()
+					if *sfsp != nil {
+						dfsp := dst.toUint32Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []uint32{}
+						}
+					}
+				}
+			case isPointer: // E.g., *uint32
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toUint32Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toUint32Ptr()
+						if *dfpp == nil {
+							*dfpp = Uint32(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., uint32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toUint32(); v != 0 {
+						*dst.toUint32() = v
+					}
+				}
+			}
+		case reflect.Uint64:
+			switch {
+			case isSlice: // E.g., []uint64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toUint64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toUint64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []uint64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *uint64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toUint64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toUint64Ptr()
+						if *dfpp == nil {
+							*dfpp = Uint64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., uint64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toUint64(); v != 0 {
+						*dst.toUint64() = v
+					}
+				}
+			}
+		case reflect.Float32:
+			switch {
+			case isSlice: // E.g., []float32
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toFloat32Slice()
+					if *sfsp != nil {
+						dfsp := dst.toFloat32Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []float32{}
+						}
+					}
+				}
+			case isPointer: // E.g., *float32
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toFloat32Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toFloat32Ptr()
+						if *dfpp == nil {
+							*dfpp = Float32(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., float32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toFloat32(); v != 0 {
+						*dst.toFloat32() = v
+					}
+				}
+			}
+		case reflect.Float64:
+			switch {
+			case isSlice: // E.g., []float64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toFloat64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toFloat64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []float64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *float64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toFloat64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toFloat64Ptr()
+						if *dfpp == nil {
+							*dfpp = Float64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., float64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toFloat64(); v != 0 {
+						*dst.toFloat64() = v
+					}
+				}
+			}
+		case reflect.Bool:
+			switch {
+			case isSlice: // E.g., []bool
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toBoolSlice()
+					if *sfsp != nil {
+						dfsp := dst.toBoolSlice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []bool{}
+						}
+					}
+				}
+			case isPointer: // E.g., *bool
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toBoolPtr()
+					if *sfpp != nil {
+						dfpp := dst.toBoolPtr()
+						if *dfpp == nil {
+							*dfpp = Bool(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., bool
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toBool(); v {
+						*dst.toBool() = v
+					}
+				}
+			}
+		case reflect.String:
+			switch {
+			case isSlice: // E.g., []string
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toStringSlice()
+					if *sfsp != nil {
+						dfsp := dst.toStringSlice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []string{}
+						}
+					}
+				}
+			case isPointer: // E.g., *string
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toStringPtr()
+					if *sfpp != nil {
+						dfpp := dst.toStringPtr()
+						if *dfpp == nil {
+							*dfpp = String(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., string
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toString(); v != "" {
+						*dst.toString() = v
+					}
+				}
+			}
+		case reflect.Slice:
+			isProto3 := props.Prop[i].proto3
+			switch {
+			case isPointer:
+				panic("bad pointer in byte slice case in " + tf.Name())
+			case tf.Elem().Kind() != reflect.Uint8:
+				panic("bad element kind in byte slice case in " + tf.Name())
+			case isSlice: // E.g., [][]byte
+				mfi.merge = func(dst, src pointer) {
+					sbsp := src.toBytesSlice()
+					if *sbsp != nil {
+						dbsp := dst.toBytesSlice()
+						for _, sb := range *sbsp {
+							if sb == nil {
+								*dbsp = append(*dbsp, nil)
+							} else {
+								*dbsp = append(*dbsp, append([]byte{}, sb...))
+							}
+						}
+						if *dbsp == nil {
+							*dbsp = [][]byte{}
+						}
+					}
+				}
+			default: // E.g., []byte
+				mfi.merge = func(dst, src pointer) {
+					sbp := src.toBytes()
+					if *sbp != nil {
+						dbp := dst.toBytes()
+						if !isProto3 || len(*sbp) > 0 {
+							*dbp = append([]byte{}, *sbp...)
+						}
+					}
+				}
+			}
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("message field %s without pointer", tf))
+			case isSlice: // E.g., []*pb.T
+				mi := getMergeInfo(tf)
+				mfi.merge = func(dst, src pointer) {
+					sps := src.getPointerSlice()
+					if sps != nil {
+						dps := dst.getPointerSlice()
+						for _, sp := range sps {
+							var dp pointer
+							if !sp.isNil() {
+								dp = valToPointer(reflect.New(tf))
+								mi.merge(dp, sp)
+							}
+							dps = append(dps, dp)
+						}
+						if dps == nil {
+							dps = []pointer{}
+						}
+						dst.setPointerSlice(dps)
+					}
+				}
+			default: // E.g., *pb.T
+				mi := getMergeInfo(tf)
+				mfi.merge = func(dst, src pointer) {
+					sp := src.getPointer()
+					if !sp.isNil() {
+						dp := dst.getPointer()
+						if dp.isNil() {
+							dp = valToPointer(reflect.New(tf))
+							dst.setPointer(dp)
+						}
+						mi.merge(dp, sp)
+					}
+				}
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic("bad pointer or slice in map case in " + tf.Name())
+			default: // E.g., map[K]V
+				mfi.merge = func(dst, src pointer) {
+					sm := src.asPointerTo(tf).Elem()
+					if sm.Len() == 0 {
+						return
+					}
+					dm := dst.asPointerTo(tf).Elem()
+					if dm.IsNil() {
+						dm.Set(reflect.MakeMap(tf))
+					}
+
+					switch tf.Elem().Kind() {
+					case reflect.Ptr: // Proto struct (e.g., *T)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							val = reflect.ValueOf(Clone(val.Interface().(Message)))
+							dm.SetMapIndex(key, val)
+						}
+					case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+							dm.SetMapIndex(key, val)
+						}
+					default: // Basic type (e.g., string)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							dm.SetMapIndex(key, val)
+						}
+					}
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic("bad pointer or slice in interface case in " + tf.Name())
+			default: // E.g., interface{}
+				// TODO: Make this faster?
+				mfi.merge = func(dst, src pointer) {
+					su := src.asPointerTo(tf).Elem()
+					if !su.IsNil() {
+						du := dst.asPointerTo(tf).Elem()
+						typ := su.Elem().Type()
+						if du.IsNil() || du.Elem().Type() != typ {
+							du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
+						}
+						sv := su.Elem().Elem().Field(0)
+						if sv.Kind() == reflect.Ptr && sv.IsNil() {
+							return
+						}
+						dv := du.Elem().Elem().Field(0)
+						if dv.Kind() == reflect.Ptr && dv.IsNil() {
+							dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
+						}
+						switch sv.Type().Kind() {
+						case reflect.Ptr: // Proto struct (e.g., *T)
+							Merge(dv.Interface().(Message), sv.Interface().(Message))
+						case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+							dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
+						default: // Basic type (e.g., string)
+							dv.Set(sv)
+						}
+					}
+				}
+			}
+		default:
+			panic(fmt.Sprintf("merger not found for type:%s", tf))
+		}
+		mi.fields = append(mi.fields, mfi)
+	}
+
+	mi.unrecognized = invalidField
+	if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+		if f.Type != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		mi.unrecognized = toField(&f)
+	}
+
+	atomic.StoreInt32(&mi.initialized, 1)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
new file mode 100644
index 0000000..ebf1caa
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
@@ -0,0 +1,2051 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"unicode/utf8"
+)
+
+// Unmarshal is the entry point from the generated .pb.go files.
+// This function is not intended to be used by non-generated code.
+// This function is not subject to any compatibility guarantee.
+// msg contains a pointer to a protocol buffer struct.
+// b is the data to be unmarshaled into the protocol buffer.
+// a is a pointer to a place to store cached unmarshal information.
+func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
+	// Load the unmarshal information for this message type.
+	// The atomic load ensures memory consistency.
+	u := atomicLoadUnmarshalInfo(&a.unmarshal)
+	if u == nil {
+		// Slow path: find unmarshal info for msg, update a with it.
+		u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
+		atomicStoreUnmarshalInfo(&a.unmarshal, u)
+	}
+	// Then do the unmarshaling.
+	err := u.unmarshal(toPointer(&msg), b)
+	return err
+}
+
+type unmarshalInfo struct {
+	typ reflect.Type // type of the protobuf struct
+
+	// 0 = only typ field is initialized
+	// 1 = completely initialized
+	initialized     int32
+	lock            sync.Mutex                    // prevents double initialization
+	dense           []unmarshalFieldInfo          // fields indexed by tag #
+	sparse          map[uint64]unmarshalFieldInfo // fields indexed by tag #
+	reqFields       []string                      // names of required fields
+	reqMask         uint64                        // 1<<len(reqFields)-1
+	unrecognized    field                         // offset of []byte to put unrecognized data (or invalidField if we should throw it away)
+	extensions      field                         // offset of extensions field (of type proto.XXX_InternalExtensions), or invalidField if it does not exist
+	oldExtensions   field                         // offset of old-form extensions field (of type map[int]Extension)
+	extensionRanges []ExtensionRange              // if non-nil, implies extensions field is valid
+	isMessageSet    bool                          // if true, implies extensions field is valid
+}
+
+// An unmarshaler takes a stream of bytes and a pointer to a field of a message.
+// It decodes the field, stores it at f, and returns the unused bytes.
+// w is the wire encoding.
+// b is the data after the tag and wire encoding have been read.
+type unmarshaler func(b []byte, f pointer, w int) ([]byte, error)
+
+type unmarshalFieldInfo struct {
+	// location of the field in the proto message structure.
+	field field
+
+	// function to unmarshal the data for the field.
+	unmarshal unmarshaler
+
+	// if a required field, contains a single set bit at this field's index in the required field list.
+	reqMask uint64
+
+	name string // name of the field, for error reporting
+}
+
+var (
+	unmarshalInfoMap  = map[reflect.Type]*unmarshalInfo{}
+	unmarshalInfoLock sync.Mutex
+)
+
+// getUnmarshalInfo returns the data structure which can be
+// subsequently used to unmarshal a message of the given type.
+// t is the type of the message (note: not pointer to message).
+func getUnmarshalInfo(t reflect.Type) *unmarshalInfo {
+	// It would be correct to return a new unmarshalInfo
+	// unconditionally. We would end up allocating one
+	// per occurrence of that type as a message or submessage.
+	// We use a cache here just to reduce memory usage.
+	unmarshalInfoLock.Lock()
+	defer unmarshalInfoLock.Unlock()
+	u := unmarshalInfoMap[t]
+	if u == nil {
+		u = &unmarshalInfo{typ: t}
+		// Note: we just set the type here. The rest of the fields
+		// will be initialized on first use.
+		unmarshalInfoMap[t] = u
+	}
+	return u
+}
+
+// unmarshal does the main work of unmarshaling a message.
+// u provides type information used to unmarshal the message.
+// m is a pointer to a protocol buffer message.
+// b is a byte stream to unmarshal into m.
+// This is top routine used when recursively unmarshaling submessages.
+func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeUnmarshalInfo()
+	}
+	if u.isMessageSet {
+		return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
+	}
+	var reqMask uint64 // bitmask of required fields we've seen.
+	var errLater error
+	for len(b) > 0 {
+		// Read tag and wire type.
+		// Special case 1 and 2 byte varints.
+		var x uint64
+		if b[0] < 128 {
+			x = uint64(b[0])
+			b = b[1:]
+		} else if len(b) >= 2 && b[1] < 128 {
+			x = uint64(b[0]&0x7f) + uint64(b[1])<<7
+			b = b[2:]
+		} else {
+			var n int
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+		}
+		tag := x >> 3
+		wire := int(x) & 7
+
+		// Dispatch on the tag to one of the unmarshal* functions below.
+		var f unmarshalFieldInfo
+		if tag < uint64(len(u.dense)) {
+			f = u.dense[tag]
+		} else {
+			f = u.sparse[tag]
+		}
+		if fn := f.unmarshal; fn != nil {
+			var err error
+			b, err = fn(b, m.offset(f.field), wire)
+			if err == nil {
+				reqMask |= f.reqMask
+				continue
+			}
+			if r, ok := err.(*RequiredNotSetError); ok {
+				// Remember this error, but keep parsing. We need to produce
+				// a full parse even if a required field is missing.
+				if errLater == nil {
+					errLater = r
+				}
+				reqMask |= f.reqMask
+				continue
+			}
+			if err != errInternalBadWireType {
+				if err == errInvalidUTF8 {
+					if errLater == nil {
+						fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+						errLater = &invalidUTF8Error{fullName}
+					}
+					continue
+				}
+				return err
+			}
+			// Fragments with bad wire type are treated as unknown fields.
+		}
+
+		// Unknown tag.
+		if !u.unrecognized.IsValid() {
+			// Don't keep unrecognized data; just skip it.
+			var err error
+			b, err = skipField(b, wire)
+			if err != nil {
+				return err
+			}
+			continue
+		}
+		// Keep unrecognized data around.
+		// maybe in extensions, maybe in the unrecognized field.
+		z := m.offset(u.unrecognized).toBytes()
+		var emap map[int32]Extension
+		var e Extension
+		for _, r := range u.extensionRanges {
+			if uint64(r.Start) <= tag && tag <= uint64(r.End) {
+				if u.extensions.IsValid() {
+					mp := m.offset(u.extensions).toExtensions()
+					emap = mp.extensionsWrite()
+					e = emap[int32(tag)]
+					z = &e.enc
+					break
+				}
+				if u.oldExtensions.IsValid() {
+					p := m.offset(u.oldExtensions).toOldExtensions()
+					emap = *p
+					if emap == nil {
+						emap = map[int32]Extension{}
+						*p = emap
+					}
+					e = emap[int32(tag)]
+					z = &e.enc
+					break
+				}
+				panic("no extensions field available")
+			}
+		}
+
+		// Use wire type to skip data.
+		var err error
+		b0 := b
+		b, err = skipField(b, wire)
+		if err != nil {
+			return err
+		}
+		*z = encodeVarint(*z, tag<<3|uint64(wire))
+		*z = append(*z, b0[:len(b0)-len(b)]...)
+
+		if emap != nil {
+			emap[int32(tag)] = e
+		}
+	}
+	if reqMask != u.reqMask && errLater == nil {
+		// A required field of this message is missing.
+		for _, n := range u.reqFields {
+			if reqMask&1 == 0 {
+				errLater = &RequiredNotSetError{n}
+			}
+			reqMask >>= 1
+		}
+	}
+	return errLater
+}
+
+// computeUnmarshalInfo fills in u with information for use
+// in unmarshaling protocol buffers of type u.typ.
+func (u *unmarshalInfo) computeUnmarshalInfo() {
+	u.lock.Lock()
+	defer u.lock.Unlock()
+	if u.initialized != 0 {
+		return
+	}
+	t := u.typ
+	n := t.NumField()
+
+	// Set up the "not found" value for the unrecognized byte buffer.
+	// This is the default for proto3.
+	u.unrecognized = invalidField
+	u.extensions = invalidField
+	u.oldExtensions = invalidField
+
+	// List of the generated type and offset for each oneof field.
+	type oneofField struct {
+		ityp  reflect.Type // interface type of oneof field
+		field field        // offset in containing message
+	}
+	var oneofFields []oneofField
+
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if f.Name == "XXX_unrecognized" {
+			// The byte slice used to hold unrecognized input is special.
+			if f.Type != reflect.TypeOf(([]byte)(nil)) {
+				panic("bad type for XXX_unrecognized field: " + f.Type.Name())
+			}
+			u.unrecognized = toField(&f)
+			continue
+		}
+		if f.Name == "XXX_InternalExtensions" {
+			// Ditto here.
+			if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
+				panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
+			}
+			u.extensions = toField(&f)
+			if f.Tag.Get("protobuf_messageset") == "1" {
+				u.isMessageSet = true
+			}
+			continue
+		}
+		if f.Name == "XXX_extensions" {
+			// An older form of the extensions field.
+			if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) {
+				panic("bad type for XXX_extensions field: " + f.Type.Name())
+			}
+			u.oldExtensions = toField(&f)
+			continue
+		}
+		if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
+			continue
+		}
+
+		oneof := f.Tag.Get("protobuf_oneof")
+		if oneof != "" {
+			oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
+			// The rest of oneof processing happens below.
+			continue
+		}
+
+		tags := f.Tag.Get("protobuf")
+		tagArray := strings.Split(tags, ",")
+		if len(tagArray) < 2 {
+			panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
+		}
+		tag, err := strconv.Atoi(tagArray[1])
+		if err != nil {
+			panic("protobuf tag field not an integer: " + tagArray[1])
+		}
+
+		name := ""
+		for _, tag := range tagArray[3:] {
+			if strings.HasPrefix(tag, "name=") {
+				name = tag[5:]
+			}
+		}
+
+		// Extract unmarshaling function from the field (its type and tags).
+		unmarshal := fieldUnmarshaler(&f)
+
+		// Required field?
+		var reqMask uint64
+		if tagArray[2] == "req" {
+			bit := len(u.reqFields)
+			u.reqFields = append(u.reqFields, name)
+			reqMask = uint64(1) << uint(bit)
+			// TODO: if we have more than 64 required fields, we end up
+			// not verifying that all required fields are present.
+			// Fix this, perhaps using a count of required fields?
+		}
+
+		// Store the info in the correct slot in the message.
+		u.setTag(tag, toField(&f), unmarshal, reqMask, name)
+	}
+
+	// Find any types associated with oneof fields.
+	// TODO: XXX_OneofFuncs returns more info than we need.  Get rid of some of it?
+	fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
+	if fn.IsValid() {
+		res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
+		for i := res.Len() - 1; i >= 0; i-- {
+			v := res.Index(i)                             // interface{}
+			tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
+			typ := tptr.Elem()                            // Msg_X
+
+			f := typ.Field(0) // oneof implementers have one field
+			baseUnmarshal := fieldUnmarshaler(&f)
+			tags := strings.Split(f.Tag.Get("protobuf"), ",")
+			fieldNum, err := strconv.Atoi(tags[1])
+			if err != nil {
+				panic("protobuf tag field not an integer: " + tags[1])
+			}
+			var name string
+			for _, tag := range tags {
+				if strings.HasPrefix(tag, "name=") {
+					name = strings.TrimPrefix(tag, "name=")
+					break
+				}
+			}
+
+			// Find the oneof field that this struct implements.
+			// Might take O(n^2) to process all of the oneofs, but who cares.
+			for _, of := range oneofFields {
+				if tptr.Implements(of.ityp) {
+					// We have found the corresponding interface for this struct.
+					// That lets us know where this struct should be stored
+					// when we encounter it during unmarshaling.
+					unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+					u.setTag(fieldNum, of.field, unmarshal, 0, name)
+				}
+			}
+		}
+	}
+
+	// Get extension ranges, if any.
+	fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+	if fn.IsValid() {
+		if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
+			panic("a message with extensions, but no extensions field in " + t.Name())
+		}
+		u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
+	}
+
+	// Explicitly disallow tag 0. This will ensure we flag an error
+	// when decoding a buffer of all zeros. Without this code, we
+	// would decode and skip an all-zero buffer of even length.
+	// [0 0] is [tag=0/wiretype=varint varint-encoded-0].
+	u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
+		return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
+	}, 0, "")
+
+	// Set mask for required field check.
+	u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
+
+	atomic.StoreInt32(&u.initialized, 1)
+}
+
+// setTag stores the unmarshal information for the given tag.
+// tag = tag # for field
+// field/unmarshal = unmarshal info for that field.
+// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
+// name = short name of the field.
+func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
+	i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
+	n := u.typ.NumField()
+	if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
+		for len(u.dense) <= tag {
+			u.dense = append(u.dense, unmarshalFieldInfo{})
+		}
+		u.dense[tag] = i
+		return
+	}
+	if u.sparse == nil {
+		u.sparse = map[uint64]unmarshalFieldInfo{}
+	}
+	u.sparse[uint64(tag)] = i
+}
+
+// fieldUnmarshaler returns an unmarshaler for the given field.
+func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
+	if f.Type.Kind() == reflect.Map {
+		return makeUnmarshalMap(f)
+	}
+	return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
+}
+
+// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
+func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
+	tagArray := strings.Split(tags, ",")
+	encoding := tagArray[0]
+	name := "unknown"
+	proto3 := false
+	validateUTF8 := true
+	for _, tag := range tagArray[3:] {
+		if strings.HasPrefix(tag, "name=") {
+			name = tag[5:]
+		}
+		if tag == "proto3" {
+			proto3 = true
+		}
+	}
+	validateUTF8 = validateUTF8 && proto3
+
+	// Figure out packaging (pointer, slice, or both)
+	slice := false
+	pointer := false
+	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+		slice = true
+		t = t.Elem()
+	}
+	if t.Kind() == reflect.Ptr {
+		pointer = true
+		t = t.Elem()
+	}
+
+	// We'll never have both pointer and slice for basic types.
+	if pointer && slice && t.Kind() != reflect.Struct {
+		panic("both pointer and slice for basic type in " + t.Name())
+	}
+
+	switch t.Kind() {
+	case reflect.Bool:
+		if pointer {
+			return unmarshalBoolPtr
+		}
+		if slice {
+			return unmarshalBoolSlice
+		}
+		return unmarshalBoolValue
+	case reflect.Int32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return unmarshalFixedS32Ptr
+			}
+			if slice {
+				return unmarshalFixedS32Slice
+			}
+			return unmarshalFixedS32Value
+		case "varint":
+			// this could be int32 or enum
+			if pointer {
+				return unmarshalInt32Ptr
+			}
+			if slice {
+				return unmarshalInt32Slice
+			}
+			return unmarshalInt32Value
+		case "zigzag32":
+			if pointer {
+				return unmarshalSint32Ptr
+			}
+			if slice {
+				return unmarshalSint32Slice
+			}
+			return unmarshalSint32Value
+		}
+	case reflect.Int64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return unmarshalFixedS64Ptr
+			}
+			if slice {
+				return unmarshalFixedS64Slice
+			}
+			return unmarshalFixedS64Value
+		case "varint":
+			if pointer {
+				return unmarshalInt64Ptr
+			}
+			if slice {
+				return unmarshalInt64Slice
+			}
+			return unmarshalInt64Value
+		case "zigzag64":
+			if pointer {
+				return unmarshalSint64Ptr
+			}
+			if slice {
+				return unmarshalSint64Slice
+			}
+			return unmarshalSint64Value
+		}
+	case reflect.Uint32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return unmarshalFixed32Ptr
+			}
+			if slice {
+				return unmarshalFixed32Slice
+			}
+			return unmarshalFixed32Value
+		case "varint":
+			if pointer {
+				return unmarshalUint32Ptr
+			}
+			if slice {
+				return unmarshalUint32Slice
+			}
+			return unmarshalUint32Value
+		}
+	case reflect.Uint64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return unmarshalFixed64Ptr
+			}
+			if slice {
+				return unmarshalFixed64Slice
+			}
+			return unmarshalFixed64Value
+		case "varint":
+			if pointer {
+				return unmarshalUint64Ptr
+			}
+			if slice {
+				return unmarshalUint64Slice
+			}
+			return unmarshalUint64Value
+		}
+	case reflect.Float32:
+		if pointer {
+			return unmarshalFloat32Ptr
+		}
+		if slice {
+			return unmarshalFloat32Slice
+		}
+		return unmarshalFloat32Value
+	case reflect.Float64:
+		if pointer {
+			return unmarshalFloat64Ptr
+		}
+		if slice {
+			return unmarshalFloat64Slice
+		}
+		return unmarshalFloat64Value
+	case reflect.Map:
+		panic("map type in typeUnmarshaler in " + t.Name())
+	case reflect.Slice:
+		if pointer {
+			panic("bad pointer in slice case in " + t.Name())
+		}
+		if slice {
+			return unmarshalBytesSlice
+		}
+		return unmarshalBytesValue
+	case reflect.String:
+		if validateUTF8 {
+			if pointer {
+				return unmarshalUTF8StringPtr
+			}
+			if slice {
+				return unmarshalUTF8StringSlice
+			}
+			return unmarshalUTF8StringValue
+		}
+		if pointer {
+			return unmarshalStringPtr
+		}
+		if slice {
+			return unmarshalStringSlice
+		}
+		return unmarshalStringValue
+	case reflect.Struct:
+		// message or group field
+		if !pointer {
+			panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding))
+		}
+		switch encoding {
+		case "bytes":
+			if slice {
+				return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
+		case "group":
+			if slice {
+				return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
+		}
+	}
+	panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
+}
+
+// Below are all the unmarshalers for individual fields of various types.
+
+func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	*f.toInt64() = v
+	return b, nil
+}
+
+func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	*f.toInt64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int64(x)
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	*f.toInt64() = v
+	return b, nil
+}
+
+func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	*f.toInt64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int64(x>>1) ^ int64(x)<<63>>63
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	*f.toUint64() = v
+	return b, nil
+}
+
+func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	*f.toUint64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := uint64(x)
+			s := f.toUint64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	s := f.toUint64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	*f.toInt32() = v
+	return b, nil
+}
+
+func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	f.setInt32Ptr(v)
+	return b, nil
+}
+
+func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int32(x)
+			f.appendInt32Slice(v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	f.appendInt32Slice(v)
+	return b, nil
+}
+
+func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	*f.toInt32() = v
+	return b, nil
+}
+
+func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	f.setInt32Ptr(v)
+	return b, nil
+}
+
+func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int32(x>>1) ^ int32(x)<<31>>31
+			f.appendInt32Slice(v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	f.appendInt32Slice(v)
+	return b, nil
+}
+
+func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	*f.toUint32() = v
+	return b, nil
+}
+
+func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	*f.toUint32Ptr() = &v
+	return b, nil
+}
+
+func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := uint32(x)
+			s := f.toUint32Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	s := f.toUint32Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	*f.toUint64() = v
+	return b[8:], nil
+}
+
+func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	*f.toUint64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+			s := f.toUint64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	s := f.toUint64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	*f.toInt64() = v
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	*f.toInt64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	*f.toUint32() = v
+	return b[4:], nil
+}
+
+func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	*f.toUint32Ptr() = &v
+	return b[4:], nil
+}
+
+func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+			s := f.toUint32Slice()
+			*s = append(*s, v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	s := f.toUint32Slice()
+	*s = append(*s, v)
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	*f.toInt32() = v
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	f.setInt32Ptr(v)
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+			f.appendInt32Slice(v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	f.appendInt32Slice(v)
+	return b[4:], nil
+}
+
+func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	// Note: any length varint is allowed, even though any sane
+	// encoder will use one byte.
+	// See https://github.com/golang/protobuf/issues/76
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	// TODO: check if x>1? Tests seem to indicate no.
+	v := x != 0
+	*f.toBool() = v
+	return b[n:], nil
+}
+
+func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := x != 0
+	*f.toBoolPtr() = &v
+	return b[n:], nil
+}
+
+func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := x != 0
+			s := f.toBoolSlice()
+			*s = append(*s, v)
+			b = b[n:]
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := x != 0
+	s := f.toBoolSlice()
+	*s = append(*s, v)
+	return b[n:], nil
+}
+
+func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	*f.toFloat64() = v
+	return b[8:], nil
+}
+
+func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	*f.toFloat64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+			s := f.toFloat64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	s := f.toFloat64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	*f.toFloat32() = v
+	return b[4:], nil
+}
+
+func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	*f.toFloat32Ptr() = &v
+	return b[4:], nil
+}
+
+func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+			s := f.toFloat32Slice()
+			*s = append(*s, v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	s := f.toFloat32Slice()
+	*s = append(*s, v)
+	return b[4:], nil
+}
+
+func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toString() = v
+	return b[x:], nil
+}
+
+func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toStringPtr() = &v
+	return b[x:], nil
+}
+
+func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	s := f.toStringSlice()
+	*s = append(*s, v)
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toString() = v
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toStringPtr() = &v
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	s := f.toStringSlice()
+	*s = append(*s, v)
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+var emptyBuf [0]byte
+
+func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	// The use of append here is a trick which avoids the zeroing
+	// that would be required if we used a make/copy pair.
+	// We append to emptyBuf instead of nil because we want
+	// a non-nil result even when the length is 0.
+	v := append(emptyBuf[:], b[:x]...)
+	*f.toBytes() = v
+	return b[x:], nil
+}
+
+func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := append(emptyBuf[:], b[:x]...)
+	s := f.toBytesSlice()
+	*s = append(*s, v)
+	return b[x:], nil
+}
+
+func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return b, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		// First read the message field to see if something is there.
+		// The semantics of multiple submessages are weird.  Instead of
+		// the last one winning (as it is for all other fields), multiple
+		// submessages are merged.
+		v := f.getPointer()
+		if v.isNil() {
+			v = valToPointer(reflect.New(sub.typ))
+			f.setPointer(v)
+		}
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return b, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := valToPointer(reflect.New(sub.typ))
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		f.appendPointer(v)
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireStartGroup {
+			return b, errInternalBadWireType
+		}
+		x, y := findEndGroup(b)
+		if x < 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := f.getPointer()
+		if v.isNil() {
+			v = valToPointer(reflect.New(sub.typ))
+			f.setPointer(v)
+		}
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		return b[y:], err
+	}
+}
+
+func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireStartGroup {
+			return b, errInternalBadWireType
+		}
+		x, y := findEndGroup(b)
+		if x < 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := valToPointer(reflect.New(sub.typ))
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		f.appendPointer(v)
+		return b[y:], err
+	}
+}
+
+func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
+	t := f.Type
+	kt := t.Key()
+	vt := t.Elem()
+	unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
+	unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val"))
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		// The map entry is a submessage. Figure out how big it is.
+		if w != WireBytes {
+			return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		r := b[x:] // unused data to return
+		b = b[:x]  // data for map entry
+
+		// Note: we could use #keys * #values ~= 200 functions
+		// to do map decoding without reflection. Probably not worth it.
+		// Maps will be somewhat slow. Oh well.
+
+		// Read key and value from data.
+		var nerr nonFatal
+		k := reflect.New(kt)
+		v := reflect.New(vt)
+		for len(b) > 0 {
+			x, n := decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			wire := int(x) & 7
+			b = b[n:]
+
+			var err error
+			switch x >> 3 {
+			case 1:
+				b, err = unmarshalKey(b, valToPointer(k), wire)
+			case 2:
+				b, err = unmarshalVal(b, valToPointer(v), wire)
+			default:
+				err = errInternalBadWireType // skip unknown tag
+			}
+
+			if nerr.Merge(err) {
+				continue
+			}
+			if err != errInternalBadWireType {
+				return nil, err
+			}
+
+			// Skip past unknown fields.
+			b, err = skipField(b, wire)
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		// Get map, allocate if needed.
+		m := f.asPointerTo(t).Elem() // an addressable map[K]T
+		if m.IsNil() {
+			m.Set(reflect.MakeMap(t))
+		}
+
+		// Insert into map.
+		m.SetMapIndex(k.Elem(), v.Elem())
+
+		return r, nerr.E
+	}
+}
+
+// makeUnmarshalOneof makes an unmarshaler for oneof fields.
+// for:
+// message Msg {
+//   oneof F {
+//     int64 X = 1;
+//     float64 Y = 2;
+//   }
+// }
+// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
+// ityp is the interface type of the oneof field (e.g. isMsg_F).
+// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
+// Note that this function will be called once for each case in the oneof.
+func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
+	sf := typ.Field(0)
+	field0 := toField(&sf)
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		// Allocate holder for value.
+		v := reflect.New(typ)
+
+		// Unmarshal data into holder.
+		// We unmarshal into the first field of the holder object.
+		var err error
+		var nerr nonFatal
+		b, err = unmarshal(b, valToPointer(v).offset(field0), w)
+		if !nerr.Merge(err) {
+			return nil, err
+		}
+
+		// Write pointer to holder into target field.
+		f.asPointerTo(ityp).Elem().Set(v)
+
+		return b, nerr.E
+	}
+}
+
+// Error used by decode internally.
+var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
+
+// skipField skips past a field of type wire and returns the remaining bytes.
+func skipField(b []byte, wire int) ([]byte, error) {
+	switch wire {
+	case WireVarint:
+		_, k := decodeVarint(b)
+		if k == 0 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[k:]
+	case WireFixed32:
+		if len(b) < 4 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[4:]
+	case WireFixed64:
+		if len(b) < 8 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[8:]
+	case WireBytes:
+		m, k := decodeVarint(b)
+		if k == 0 || uint64(len(b)-k) < m {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[uint64(k)+m:]
+	case WireStartGroup:
+		_, i := findEndGroup(b)
+		if i == -1 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[i:]
+	default:
+		return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
+	}
+	return b, nil
+}
+
+// findEndGroup finds the index of the next EndGroup tag.
+// Groups may be nested, so the "next" EndGroup tag is the first
+// unpaired EndGroup.
+// findEndGroup returns the indexes of the start and end of the EndGroup tag.
+// Returns (-1,-1) if it can't find one.
+func findEndGroup(b []byte) (int, int) {
+	depth := 1
+	i := 0
+	for {
+		x, n := decodeVarint(b[i:])
+		if n == 0 {
+			return -1, -1
+		}
+		j := i
+		i += n
+		switch x & 7 {
+		case WireVarint:
+			_, k := decodeVarint(b[i:])
+			if k == 0 {
+				return -1, -1
+			}
+			i += k
+		case WireFixed32:
+			if len(b)-4 < i {
+				return -1, -1
+			}
+			i += 4
+		case WireFixed64:
+			if len(b)-8 < i {
+				return -1, -1
+			}
+			i += 8
+		case WireBytes:
+			m, k := decodeVarint(b[i:])
+			if k == 0 {
+				return -1, -1
+			}
+			i += k
+			if uint64(len(b)-i) < m {
+				return -1, -1
+			}
+			i += int(m)
+		case WireStartGroup:
+			depth++
+		case WireEndGroup:
+			depth--
+			if depth == 0 {
+				return j, i
+			}
+		default:
+			return -1, -1
+		}
+	}
+}
+
+// encodeVarint appends a varint-encoded integer to b and returns the result.
+func encodeVarint(b []byte, x uint64) []byte {
+	for x >= 1<<7 {
+		b = append(b, byte(x&0x7f|0x80))
+		x >>= 7
+	}
+	return append(b, byte(x))
+}
+
+// decodeVarint reads a varint-encoded integer from b.
+// Returns the decoded integer and the number of bytes read.
+// If there is an error, it returns 0,0.
+func decodeVarint(b []byte) (uint64, int) {
+	var x, y uint64
+	if len(b) <= 0 {
+		goto bad
+	}
+	x = uint64(b[0])
+	if x < 0x80 {
+		return x, 1
+	}
+	x -= 0x80
+
+	if len(b) <= 1 {
+		goto bad
+	}
+	y = uint64(b[1])
+	x += y << 7
+	if y < 0x80 {
+		return x, 2
+	}
+	x -= 0x80 << 7
+
+	if len(b) <= 2 {
+		goto bad
+	}
+	y = uint64(b[2])
+	x += y << 14
+	if y < 0x80 {
+		return x, 3
+	}
+	x -= 0x80 << 14
+
+	if len(b) <= 3 {
+		goto bad
+	}
+	y = uint64(b[3])
+	x += y << 21
+	if y < 0x80 {
+		return x, 4
+	}
+	x -= 0x80 << 21
+
+	if len(b) <= 4 {
+		goto bad
+	}
+	y = uint64(b[4])
+	x += y << 28
+	if y < 0x80 {
+		return x, 5
+	}
+	x -= 0x80 << 28
+
+	if len(b) <= 5 {
+		goto bad
+	}
+	y = uint64(b[5])
+	x += y << 35
+	if y < 0x80 {
+		return x, 6
+	}
+	x -= 0x80 << 35
+
+	if len(b) <= 6 {
+		goto bad
+	}
+	y = uint64(b[6])
+	x += y << 42
+	if y < 0x80 {
+		return x, 7
+	}
+	x -= 0x80 << 42
+
+	if len(b) <= 7 {
+		goto bad
+	}
+	y = uint64(b[7])
+	x += y << 49
+	if y < 0x80 {
+		return x, 8
+	}
+	x -= 0x80 << 49
+
+	if len(b) <= 8 {
+		goto bad
+	}
+	y = uint64(b[8])
+	x += y << 56
+	if y < 0x80 {
+		return x, 9
+	}
+	x -= 0x80 << 56
+
+	if len(b) <= 9 {
+		goto bad
+	}
+	y = uint64(b[9])
+	x += y << 63
+	if y < 2 {
+		return x, 10
+	}
+
+bad:
+	return 0, 0
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 0000000..1aaee72
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,843 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+	"bufio"
+	"bytes"
+	"encoding"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"math"
+	"reflect"
+	"sort"
+	"strings"
+)
+
+var (
+	newline         = []byte("\n")
+	spaces          = []byte("                                        ")
+	endBraceNewline = []byte("}\n")
+	backslashN      = []byte{'\\', 'n'}
+	backslashR      = []byte{'\\', 'r'}
+	backslashT      = []byte{'\\', 't'}
+	backslashDQ     = []byte{'\\', '"'}
+	backslashBS     = []byte{'\\', '\\'}
+	posInf          = []byte("inf")
+	negInf          = []byte("-inf")
+	nan             = []byte("nan")
+)
+
+type writer interface {
+	io.Writer
+	WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+	ind      int
+	complete bool // if the current position is a complete line
+	compact  bool // whether to write out as a one-liner
+	w        writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+	if !strings.Contains(s, "\n") {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		w.complete = false
+		return io.WriteString(w.w, s)
+	}
+	// WriteString is typically called without newlines, so this
+	// codepath and its copy are rare.  We copy to avoid
+	// duplicating all of Write's logic here.
+	return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+	newlines := bytes.Count(p, newline)
+	if newlines == 0 {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		n, err = w.w.Write(p)
+		w.complete = false
+		return n, err
+	}
+
+	frags := bytes.SplitN(p, newline, newlines+1)
+	if w.compact {
+		for i, frag := range frags {
+			if i > 0 {
+				if err := w.w.WriteByte(' '); err != nil {
+					return n, err
+				}
+				n++
+			}
+			nn, err := w.w.Write(frag)
+			n += nn
+			if err != nil {
+				return n, err
+			}
+		}
+		return n, nil
+	}
+
+	for i, frag := range frags {
+		if w.complete {
+			w.writeIndent()
+		}
+		nn, err := w.w.Write(frag)
+		n += nn
+		if err != nil {
+			return n, err
+		}
+		if i+1 < len(frags) {
+			if err := w.w.WriteByte('\n'); err != nil {
+				return n, err
+			}
+			n++
+		}
+	}
+	w.complete = len(frags[len(frags)-1]) == 0
+	return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+	if w.compact && c == '\n' {
+		c = ' '
+	}
+	if !w.compact && w.complete {
+		w.writeIndent()
+	}
+	err := w.w.WriteByte(c)
+	w.complete = c == '\n'
+	return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+	if w.ind == 0 {
+		log.Print("proto: textWriter unindented too far")
+		return
+	}
+	w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+	if _, err := w.WriteString(props.OrigName); err != nil {
+		return err
+	}
+	if props.Wire != "group" {
+		return w.WriteByte(':')
+	}
+	return nil
+}
+
+func requiresQuotes(u string) bool {
+	// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+	for _, ch := range u {
+		switch {
+		case ch == '.' || ch == '/' || ch == '_':
+			continue
+		case '0' <= ch && ch <= '9':
+			continue
+		case 'A' <= ch && ch <= 'Z':
+			continue
+		case 'a' <= ch && ch <= 'z':
+			continue
+		default:
+			return true
+		}
+	}
+	return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+	type wkt interface {
+		XXX_WellKnownType() string
+	}
+	t, ok := sv.Addr().Interface().(wkt)
+	return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+	turl := sv.FieldByName("TypeUrl")
+	val := sv.FieldByName("Value")
+	if !turl.IsValid() || !val.IsValid() {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	b, ok := val.Interface().([]byte)
+	if !ok {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	parts := strings.Split(turl.String(), "/")
+	mt := MessageType(parts[len(parts)-1])
+	if mt == nil {
+		return false, nil
+	}
+	m := reflect.New(mt.Elem())
+	if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+		return false, nil
+	}
+	w.Write([]byte("["))
+	u := turl.String()
+	if requiresQuotes(u) {
+		writeString(w, u)
+	} else {
+		w.Write([]byte(u))
+	}
+	if w.compact {
+		w.Write([]byte("]:<"))
+	} else {
+		w.Write([]byte("]: <\n"))
+		w.ind++
+	}
+	if err := tm.writeStruct(w, m.Elem()); err != nil {
+		return true, err
+	}
+	if w.compact {
+		w.Write([]byte("> "))
+	} else {
+		w.ind--
+		w.Write([]byte(">\n"))
+	}
+	return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+	if tm.ExpandAny && isAny(sv) {
+		if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+			return err
+		}
+	}
+	st := sv.Type()
+	sprops := GetProperties(st)
+	for i := 0; i < sv.NumField(); i++ {
+		fv := sv.Field(i)
+		props := sprops.Prop[i]
+		name := st.Field(i).Name
+
+		if name == "XXX_NoUnkeyedLiteral" {
+			continue
+		}
+
+		if strings.HasPrefix(name, "XXX_") {
+			// There are two XXX_ fields:
+			//   XXX_unrecognized []byte
+			//   XXX_extensions   map[int32]proto.Extension
+			// The first is handled here;
+			// the second is handled at the bottom of this function.
+			if name == "XXX_unrecognized" && !fv.IsNil() {
+				if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if fv.Kind() == reflect.Ptr && fv.IsNil() {
+			// Field not filled in. This could be an optional field or
+			// a required field that wasn't filled in. Either way, there
+			// isn't anything we can show for it.
+			continue
+		}
+		if fv.Kind() == reflect.Slice && fv.IsNil() {
+			// Repeated field that is empty, or a bytes field that is unused.
+			continue
+		}
+
+		if props.Repeated && fv.Kind() == reflect.Slice {
+			// Repeated field.
+			for j := 0; j < fv.Len(); j++ {
+				if err := writeName(w, props); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				v := fv.Index(j)
+				if v.Kind() == reflect.Ptr && v.IsNil() {
+					// A nil message in a repeated field is not valid,
+					// but we can handle that more gracefully than panicking.
+					if _, err := w.Write([]byte("<nil>\n")); err != nil {
+						return err
+					}
+					continue
+				}
+				if err := tm.writeAny(w, v, props); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if fv.Kind() == reflect.Map {
+			// Map fields are rendered as a repeated struct with key/value fields.
+			keys := fv.MapKeys()
+			sort.Sort(mapKeys(keys))
+			for _, key := range keys {
+				val := fv.MapIndex(key)
+				if err := writeName(w, props); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				// open struct
+				if err := w.WriteByte('<'); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte('\n'); err != nil {
+						return err
+					}
+				}
+				w.indent()
+				// key
+				if _, err := w.WriteString("key:"); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+				// nil values aren't legal, but we can avoid panicking because of them.
+				if val.Kind() != reflect.Ptr || !val.IsNil() {
+					// value
+					if _, err := w.WriteString("value:"); err != nil {
+						return err
+					}
+					if !w.compact {
+						if err := w.WriteByte(' '); err != nil {
+							return err
+						}
+					}
+					if err := tm.writeAny(w, val, props.MapValProp); err != nil {
+						return err
+					}
+					if err := w.WriteByte('\n'); err != nil {
+						return err
+					}
+				}
+				// close struct
+				w.unindent()
+				if err := w.WriteByte('>'); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+			// empty bytes field
+			continue
+		}
+		if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+			// proto3 non-repeated scalar field; skip if zero value
+			if isProto3Zero(fv) {
+				continue
+			}
+		}
+
+		if fv.Kind() == reflect.Interface {
+			// Check if it is a oneof.
+			if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+				// fv is nil, or holds a pointer to generated struct.
+				// That generated struct has exactly one field,
+				// which has a protobuf struct tag.
+				if fv.IsNil() {
+					continue
+				}
+				inner := fv.Elem().Elem() // interface -> *T -> T
+				tag := inner.Type().Field(0).Tag.Get("protobuf")
+				props = new(Properties) // Overwrite the outer props var, but not its pointee.
+				props.Parse(tag)
+				// Write the value in the oneof, not the oneof itself.
+				fv = inner.Field(0)
+
+				// Special case to cope with malformed messages gracefully:
+				// If the value in the oneof is a nil pointer, don't panic
+				// in writeAny.
+				if fv.Kind() == reflect.Ptr && fv.IsNil() {
+					// Use errors.New so writeAny won't render quotes.
+					msg := errors.New("/* nil */")
+					fv = reflect.ValueOf(&msg).Elem()
+				}
+			}
+		}
+
+		if err := writeName(w, props); err != nil {
+			return err
+		}
+		if !w.compact {
+			if err := w.WriteByte(' '); err != nil {
+				return err
+			}
+		}
+
+		// Enums have a String method, so writeAny will work fine.
+		if err := tm.writeAny(w, fv, props); err != nil {
+			return err
+		}
+
+		if err := w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+
+	// Extensions (the XXX_extensions field).
+	pv := sv.Addr()
+	if _, err := extendable(pv.Interface()); err == nil {
+		if err := tm.writeExtensions(w, pv); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+	v = reflect.Indirect(v)
+
+	// Floats have special cases.
+	if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+		x := v.Float()
+		var b []byte
+		switch {
+		case math.IsInf(x, 1):
+			b = posInf
+		case math.IsInf(x, -1):
+			b = negInf
+		case math.IsNaN(x):
+			b = nan
+		}
+		if b != nil {
+			_, err := w.Write(b)
+			return err
+		}
+		// Other values are handled below.
+	}
+
+	// We don't attempt to serialise every possible value type; only those
+	// that can occur in protocol buffers.
+	switch v.Kind() {
+	case reflect.Slice:
+		// Should only be a []byte; repeated fields are handled in writeStruct.
+		if err := writeString(w, string(v.Bytes())); err != nil {
+			return err
+		}
+	case reflect.String:
+		if err := writeString(w, v.String()); err != nil {
+			return err
+		}
+	case reflect.Struct:
+		// Required/optional group/message.
+		var bra, ket byte = '<', '>'
+		if props != nil && props.Wire == "group" {
+			bra, ket = '{', '}'
+		}
+		if err := w.WriteByte(bra); err != nil {
+			return err
+		}
+		if !w.compact {
+			if err := w.WriteByte('\n'); err != nil {
+				return err
+			}
+		}
+		w.indent()
+		if v.CanAddr() {
+			// Calling v.Interface on a struct causes the reflect package to
+			// copy the entire struct. This is racy with the new Marshaler
+			// since we atomically update the XXX_sizecache.
+			//
+			// Thus, we retrieve a pointer to the struct if possible to avoid
+			// a race since v.Interface on the pointer doesn't copy the struct.
+			//
+			// If v is not addressable, then we are not worried about a race
+			// since it implies that the binary Marshaler cannot possibly be
+			// mutating this value.
+			v = v.Addr()
+		}
+		if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+			text, err := etm.MarshalText()
+			if err != nil {
+				return err
+			}
+			if _, err = w.Write(text); err != nil {
+				return err
+			}
+		} else {
+			if v.Kind() == reflect.Ptr {
+				v = v.Elem()
+			}
+			if err := tm.writeStruct(w, v); err != nil {
+				return err
+			}
+		}
+		w.unindent()
+		if err := w.WriteByte(ket); err != nil {
+			return err
+		}
+	default:
+		_, err := fmt.Fprint(w, v.Interface())
+		return err
+	}
+	return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+	return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+	// use WriteByte here to get any needed indent
+	if err := w.WriteByte('"'); err != nil {
+		return err
+	}
+	// Loop over the bytes, not the runes.
+	for i := 0; i < len(s); i++ {
+		var err error
+		// Divergence from C++: we don't escape apostrophes.
+		// There's no need to escape them, and the C++ parser
+		// copes with a naked apostrophe.
+		switch c := s[i]; c {
+		case '\n':
+			_, err = w.w.Write(backslashN)
+		case '\r':
+			_, err = w.w.Write(backslashR)
+		case '\t':
+			_, err = w.w.Write(backslashT)
+		case '"':
+			_, err = w.w.Write(backslashDQ)
+		case '\\':
+			_, err = w.w.Write(backslashBS)
+		default:
+			if isprint(c) {
+				err = w.w.WriteByte(c)
+			} else {
+				_, err = fmt.Fprintf(w.w, "\\%03o", c)
+			}
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+	if !w.compact {
+		if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+			return err
+		}
+	}
+	b := NewBuffer(data)
+	for b.index < len(b.buf) {
+		x, err := b.DecodeVarint()
+		if err != nil {
+			_, err := fmt.Fprintf(w, "/* %v */\n", err)
+			return err
+		}
+		wire, tag := x&7, x>>3
+		if wire == WireEndGroup {
+			w.unindent()
+			if _, err := w.Write(endBraceNewline); err != nil {
+				return err
+			}
+			continue
+		}
+		if _, err := fmt.Fprint(w, tag); err != nil {
+			return err
+		}
+		if wire != WireStartGroup {
+			if err := w.WriteByte(':'); err != nil {
+				return err
+			}
+		}
+		if !w.compact || wire == WireStartGroup {
+			if err := w.WriteByte(' '); err != nil {
+				return err
+			}
+		}
+		switch wire {
+		case WireBytes:
+			buf, e := b.DecodeRawBytes(false)
+			if e == nil {
+				_, err = fmt.Fprintf(w, "%q", buf)
+			} else {
+				_, err = fmt.Fprintf(w, "/* %v */", e)
+			}
+		case WireFixed32:
+			x, err = b.DecodeFixed32()
+			err = writeUnknownInt(w, x, err)
+		case WireFixed64:
+			x, err = b.DecodeFixed64()
+			err = writeUnknownInt(w, x, err)
+		case WireStartGroup:
+			err = w.WriteByte('{')
+			w.indent()
+		case WireVarint:
+			x, err = b.DecodeVarint()
+			err = writeUnknownInt(w, x, err)
+		default:
+			_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+		}
+		if err != nil {
+			return err
+		}
+		if err = w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+	if err == nil {
+		_, err = fmt.Fprint(w, x)
+	} else {
+		_, err = fmt.Fprintf(w, "/* %v */", err)
+	}
+	return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int           { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+	emap := extensionMaps[pv.Type().Elem()]
+	ep, _ := extendable(pv.Interface())
+
+	// Order the extensions by ID.
+	// This isn't strictly necessary, but it will give us
+	// canonical output, which will also make testing easier.
+	m, mu := ep.extensionsRead()
+	if m == nil {
+		return nil
+	}
+	mu.Lock()
+	ids := make([]int32, 0, len(m))
+	for id := range m {
+		ids = append(ids, id)
+	}
+	sort.Sort(int32Slice(ids))
+	mu.Unlock()
+
+	for _, extNum := range ids {
+		ext := m[extNum]
+		var desc *ExtensionDesc
+		if emap != nil {
+			desc = emap[extNum]
+		}
+		if desc == nil {
+			// Unknown extension.
+			if err := writeUnknownStruct(w, ext.enc); err != nil {
+				return err
+			}
+			continue
+		}
+
+		pb, err := GetExtension(ep, desc)
+		if err != nil {
+			return fmt.Errorf("failed getting extension: %v", err)
+		}
+
+		// Repeated extensions will appear as a slice.
+		if !desc.repeated() {
+			if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+				return err
+			}
+		} else {
+			v := reflect.ValueOf(pb)
+			for i := 0; i < v.Len(); i++ {
+				if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+					return err
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+	if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+		return err
+	}
+	if !w.compact {
+		if err := w.WriteByte(' '); err != nil {
+			return err
+		}
+	}
+	if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+		return err
+	}
+	if err := w.WriteByte('\n'); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (w *textWriter) writeIndent() {
+	if !w.complete {
+		return
+	}
+	remain := w.ind * 2
+	for remain > 0 {
+		n := remain
+		if n > len(spaces) {
+			n = len(spaces)
+		}
+		w.w.Write(spaces[:n])
+		remain -= n
+	}
+	w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+	Compact   bool // use compact text format (one line).
+	ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+	val := reflect.ValueOf(pb)
+	if pb == nil || val.IsNil() {
+		w.Write([]byte("<nil>"))
+		return nil
+	}
+	var bw *bufio.Writer
+	ww, ok := w.(writer)
+	if !ok {
+		bw = bufio.NewWriter(w)
+		ww = bw
+	}
+	aw := &textWriter{
+		w:        ww,
+		complete: true,
+		compact:  tm.Compact,
+	}
+
+	if etm, ok := pb.(encoding.TextMarshaler); ok {
+		text, err := etm.MarshalText()
+		if err != nil {
+			return err
+		}
+		if _, err = aw.Write(text); err != nil {
+			return err
+		}
+		if bw != nil {
+			return bw.Flush()
+		}
+		return nil
+	}
+	// Dereference the received pointer so we don't have outer < and >.
+	v := reflect.Indirect(val)
+	if err := tm.writeStruct(aw, v); err != nil {
+		return err
+	}
+	if bw != nil {
+		return bw.Flush()
+	}
+	return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+	var buf bytes.Buffer
+	tm.Marshal(&buf, pb)
+	return buf.String()
+}
+
+var (
+	defaultTextMarshaler = TextMarshaler{}
+	compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..bb55a3a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,880 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+	"encoding"
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+	Message string
+	Line    int // 1-based line number
+	Offset  int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+	if p.Line == 1 {
+		// show offset only for first line
+		return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+	}
+	return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+	value    string
+	err      *ParseError
+	line     int    // line number
+	offset   int    // byte number from start of input, not start of line
+	unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+	if t.err == nil {
+		return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+	}
+	return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+	s            string // remaining input
+	done         bool   // whether the parsing is finished (success or error)
+	backed       bool   // whether back() was called
+	offset, line int
+	cur          token
+}
+
+func newTextParser(s string) *textParser {
+	p := new(textParser)
+	p.s = s
+	p.line = 1
+	p.cur.line = 1
+	return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+	pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+	p.cur.err = pe
+	p.done = true
+	return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+	switch {
+	case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+		return true
+	case '0' <= c && c <= '9':
+		return true
+	}
+	switch c {
+	case '-', '+', '.', '_':
+		return true
+	}
+	return false
+}
+
+func isWhitespace(c byte) bool {
+	switch c {
+	case ' ', '\t', '\n', '\r':
+		return true
+	}
+	return false
+}
+
+func isQuote(c byte) bool {
+	switch c {
+	case '"', '\'':
+		return true
+	}
+	return false
+}
+
+func (p *textParser) skipWhitespace() {
+	i := 0
+	for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+		if p.s[i] == '#' {
+			// comment; skip to end of line or input
+			for i < len(p.s) && p.s[i] != '\n' {
+				i++
+			}
+			if i == len(p.s) {
+				break
+			}
+		}
+		if p.s[i] == '\n' {
+			p.line++
+		}
+		i++
+	}
+	p.offset += i
+	p.s = p.s[i:len(p.s)]
+	if len(p.s) == 0 {
+		p.done = true
+	}
+}
+
+func (p *textParser) advance() {
+	// Skip whitespace
+	p.skipWhitespace()
+	if p.done {
+		return
+	}
+
+	// Start of non-whitespace
+	p.cur.err = nil
+	p.cur.offset, p.cur.line = p.offset, p.line
+	p.cur.unquoted = ""
+	switch p.s[0] {
+	case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+		// Single symbol
+		p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+	case '"', '\'':
+		// Quoted string
+		i := 1
+		for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+			if p.s[i] == '\\' && i+1 < len(p.s) {
+				// skip escaped char
+				i++
+			}
+			i++
+		}
+		if i >= len(p.s) || p.s[i] != p.s[0] {
+			p.errorf("unmatched quote")
+			return
+		}
+		unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+		if err != nil {
+			p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+			return
+		}
+		p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+		p.cur.unquoted = unq
+	default:
+		i := 0
+		for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+			i++
+		}
+		if i == 0 {
+			p.errorf("unexpected byte %#x", p.s[0])
+			return
+		}
+		p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+	}
+	p.offset += len(p.cur.value)
+}
+
+var (
+	errBadUTF8 = errors.New("proto: bad UTF-8")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+	// This is based on C++'s tokenizer.cc.
+	// Despite its name, this is *not* parsing C syntax.
+	// For instance, "\0" is an invalid quoted string.
+
+	// Avoid allocation in trivial cases.
+	simple := true
+	for _, r := range s {
+		if r == '\\' || r == quote {
+			simple = false
+			break
+		}
+	}
+	if simple {
+		return s, nil
+	}
+
+	buf := make([]byte, 0, 3*len(s)/2)
+	for len(s) > 0 {
+		r, n := utf8.DecodeRuneInString(s)
+		if r == utf8.RuneError && n == 1 {
+			return "", errBadUTF8
+		}
+		s = s[n:]
+		if r != '\\' {
+			if r < utf8.RuneSelf {
+				buf = append(buf, byte(r))
+			} else {
+				buf = append(buf, string(r)...)
+			}
+			continue
+		}
+
+		ch, tail, err := unescape(s)
+		if err != nil {
+			return "", err
+		}
+		buf = append(buf, ch...)
+		s = tail
+	}
+	return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+	r, n := utf8.DecodeRuneInString(s)
+	if r == utf8.RuneError && n == 1 {
+		return "", "", errBadUTF8
+	}
+	s = s[n:]
+	switch r {
+	case 'a':
+		return "\a", s, nil
+	case 'b':
+		return "\b", s, nil
+	case 'f':
+		return "\f", s, nil
+	case 'n':
+		return "\n", s, nil
+	case 'r':
+		return "\r", s, nil
+	case 't':
+		return "\t", s, nil
+	case 'v':
+		return "\v", s, nil
+	case '?':
+		return "?", s, nil // trigraph workaround
+	case '\'', '"', '\\':
+		return string(r), s, nil
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		if len(s) < 2 {
+			return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+		}
+		ss := string(r) + s[:2]
+		s = s[2:]
+		i, err := strconv.ParseUint(ss, 8, 8)
+		if err != nil {
+			return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+		}
+		return string([]byte{byte(i)}), s, nil
+	case 'x', 'X', 'u', 'U':
+		var n int
+		switch r {
+		case 'x', 'X':
+			n = 2
+		case 'u':
+			n = 4
+		case 'U':
+			n = 8
+		}
+		if len(s) < n {
+			return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+		}
+		ss := s[:n]
+		s = s[n:]
+		i, err := strconv.ParseUint(ss, 16, 64)
+		if err != nil {
+			return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+		}
+		if r == 'x' || r == 'X' {
+			return string([]byte{byte(i)}), s, nil
+		}
+		if i > utf8.MaxRune {
+			return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+		}
+		return string(i), s, nil
+	}
+	return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+	if p.backed || p.done {
+		p.backed = false
+		return &p.cur
+	}
+	p.advance()
+	if p.done {
+		p.cur.value = ""
+	} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+		// Look for multiple quoted strings separated by whitespace,
+		// and concatenate them.
+		cat := p.cur
+		for {
+			p.skipWhitespace()
+			if p.done || !isQuote(p.s[0]) {
+				break
+			}
+			p.advance()
+			if p.cur.err != nil {
+				return &p.cur
+			}
+			cat.value += " " + p.cur.value
+			cat.unquoted += p.cur.unquoted
+		}
+		p.done = false // parser may have seen EOF, but we want to return cat
+		p.cur = cat
+	}
+	return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != s {
+		p.back()
+		return p.errorf("expected %q, found %q", s, tok.value)
+	}
+	return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+	st := sv.Type()
+	sprops := GetProperties(st)
+	for i := 0; i < st.NumField(); i++ {
+		if !isNil(sv.Field(i)) {
+			continue
+		}
+
+		props := sprops.Prop[i]
+		if props.Required {
+			return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+		}
+	}
+	return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+	i, ok := sprops.decoderOrigNames[name]
+	if ok {
+		return i, sprops.Prop[i], true
+	}
+	return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ":" {
+		// Colon is optional when the field is a group or message.
+		needColon := true
+		switch props.Wire {
+		case "group":
+			needColon = false
+		case "bytes":
+			// A "bytes" field is either a message, a string, or a repeated field;
+			// those three become *T, *string and []T respectively, so we can check for
+			// this field being a pointer to a non-string.
+			if typ.Kind() == reflect.Ptr {
+				// *T or *string
+				if typ.Elem().Kind() == reflect.String {
+					break
+				}
+			} else if typ.Kind() == reflect.Slice {
+				// []T or []*T
+				if typ.Elem().Kind() != reflect.Ptr {
+					break
+				}
+			} else if typ.Kind() == reflect.String {
+				// The proto3 exception is for a string field,
+				// which requires a colon.
+				break
+			}
+			needColon = false
+		}
+		if needColon {
+			return p.errorf("expected ':', found %q", tok.value)
+		}
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+	st := sv.Type()
+	sprops := GetProperties(st)
+	reqCount := sprops.reqCount
+	var reqFieldErr error
+	fieldSet := make(map[string]bool)
+	// A struct is a sequence of "name: value", terminated by one of
+	// '>' or '}', or the end of the input.  A name may also be
+	// "[extension]" or "[type/url]".
+	//
+	// The whole struct can also be an expanded Any message, like:
+	// [type/url] < ... struct contents ... >
+	for {
+		tok := p.next()
+		if tok.err != nil {
+			return tok.err
+		}
+		if tok.value == terminator {
+			break
+		}
+		if tok.value == "[" {
+			// Looks like an extension or an Any.
+			//
+			// TODO: Check whether we need to handle
+			// namespace rooted names (e.g. ".something.Foo").
+			extName, err := p.consumeExtName()
+			if err != nil {
+				return err
+			}
+
+			if s := strings.LastIndex(extName, "/"); s >= 0 {
+				// If it contains a slash, it's an Any type URL.
+				messageName := extName[s+1:]
+				mt := MessageType(messageName)
+				if mt == nil {
+					return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+				}
+				tok = p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				// consume an optional colon
+				if tok.value == ":" {
+					tok = p.next()
+					if tok.err != nil {
+						return tok.err
+					}
+				}
+				var terminator string
+				switch tok.value {
+				case "<":
+					terminator = ">"
+				case "{":
+					terminator = "}"
+				default:
+					return p.errorf("expected '{' or '<', found %q", tok.value)
+				}
+				v := reflect.New(mt.Elem())
+				if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+					return pe
+				}
+				b, err := Marshal(v.Interface().(Message))
+				if err != nil {
+					return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+				}
+				if fieldSet["type_url"] {
+					return p.errorf(anyRepeatedlyUnpacked, "type_url")
+				}
+				if fieldSet["value"] {
+					return p.errorf(anyRepeatedlyUnpacked, "value")
+				}
+				sv.FieldByName("TypeUrl").SetString(extName)
+				sv.FieldByName("Value").SetBytes(b)
+				fieldSet["type_url"] = true
+				fieldSet["value"] = true
+				continue
+			}
+
+			var desc *ExtensionDesc
+			// This could be faster, but it's functional.
+			// TODO: Do something smarter than a linear scan.
+			for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+				if d.Name == extName {
+					desc = d
+					break
+				}
+			}
+			if desc == nil {
+				return p.errorf("unrecognized extension %q", extName)
+			}
+
+			props := &Properties{}
+			props.Parse(desc.Tag)
+
+			typ := reflect.TypeOf(desc.ExtensionType)
+			if err := p.checkForColon(props, typ); err != nil {
+				return err
+			}
+
+			rep := desc.repeated()
+
+			// Read the extension structure, and set it in
+			// the value we're constructing.
+			var ext reflect.Value
+			if !rep {
+				ext = reflect.New(typ).Elem()
+			} else {
+				ext = reflect.New(typ.Elem()).Elem()
+			}
+			if err := p.readAny(ext, props); err != nil {
+				if _, ok := err.(*RequiredNotSetError); !ok {
+					return err
+				}
+				reqFieldErr = err
+			}
+			ep := sv.Addr().Interface().(Message)
+			if !rep {
+				SetExtension(ep, desc, ext.Interface())
+			} else {
+				old, err := GetExtension(ep, desc)
+				var sl reflect.Value
+				if err == nil {
+					sl = reflect.ValueOf(old) // existing slice
+				} else {
+					sl = reflect.MakeSlice(typ, 0, 1)
+				}
+				sl = reflect.Append(sl, ext)
+				SetExtension(ep, desc, sl.Interface())
+			}
+			if err := p.consumeOptionalSeparator(); err != nil {
+				return err
+			}
+			continue
+		}
+
+		// This is a normal, non-extension field.
+		name := tok.value
+		var dst reflect.Value
+		fi, props, ok := structFieldByName(sprops, name)
+		if ok {
+			dst = sv.Field(fi)
+		} else if oop, ok := sprops.OneofTypes[name]; ok {
+			// It is a oneof.
+			props = oop.Prop
+			nv := reflect.New(oop.Type.Elem())
+			dst = nv.Elem().Field(0)
+			field := sv.Field(oop.Field)
+			if !field.IsNil() {
+				return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+			}
+			field.Set(nv)
+		}
+		if !dst.IsValid() {
+			return p.errorf("unknown field name %q in %v", name, st)
+		}
+
+		if dst.Kind() == reflect.Map {
+			// Consume any colon.
+			if err := p.checkForColon(props, dst.Type()); err != nil {
+				return err
+			}
+
+			// Construct the map if it doesn't already exist.
+			if dst.IsNil() {
+				dst.Set(reflect.MakeMap(dst.Type()))
+			}
+			key := reflect.New(dst.Type().Key()).Elem()
+			val := reflect.New(dst.Type().Elem()).Elem()
+
+			// The map entry should be this sequence of tokens:
+			//	< key : KEY value : VALUE >
+			// However, implementations may omit key or value, and technically
+			// we should support them in any order.  See b/28924776 for a time
+			// this went wrong.
+
+			tok := p.next()
+			var terminator string
+			switch tok.value {
+			case "<":
+				terminator = ">"
+			case "{":
+				terminator = "}"
+			default:
+				return p.errorf("expected '{' or '<', found %q", tok.value)
+			}
+			for {
+				tok := p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				if tok.value == terminator {
+					break
+				}
+				switch tok.value {
+				case "key":
+					if err := p.consumeToken(":"); err != nil {
+						return err
+					}
+					if err := p.readAny(key, props.MapKeyProp); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				case "value":
+					if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
+						return err
+					}
+					if err := p.readAny(val, props.MapValProp); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				default:
+					p.back()
+					return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+				}
+			}
+
+			dst.SetMapIndex(key, val)
+			continue
+		}
+
+		// Check that it's not already set if it's not a repeated field.
+		if !props.Repeated && fieldSet[name] {
+			return p.errorf("non-repeated field %q was repeated", name)
+		}
+
+		if err := p.checkForColon(props, dst.Type()); err != nil {
+			return err
+		}
+
+		// Parse into the field.
+		fieldSet[name] = true
+		if err := p.readAny(dst, props); err != nil {
+			if _, ok := err.(*RequiredNotSetError); !ok {
+				return err
+			}
+			reqFieldErr = err
+		}
+		if props.Required {
+			reqCount--
+		}
+
+		if err := p.consumeOptionalSeparator(); err != nil {
+			return err
+		}
+
+	}
+
+	if reqCount > 0 {
+		return p.missingRequiredFieldError(sv)
+	}
+	return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+	tok := p.next()
+	if tok.err != nil {
+		return "", tok.err
+	}
+
+	// If extension name or type url is quoted, it's a single token.
+	if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+		name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+		if err != nil {
+			return "", err
+		}
+		return name, p.consumeToken("]")
+	}
+
+	// Consume everything up to "]"
+	var parts []string
+	for tok.value != "]" {
+		parts = append(parts, tok.value)
+		tok = p.next()
+		if tok.err != nil {
+			return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+		}
+		if p.done && tok.value != "]" {
+			return "", p.errorf("unclosed type_url or extension name")
+		}
+	}
+	return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ";" && tok.value != "," {
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value == "" {
+		return p.errorf("unexpected EOF")
+	}
+
+	switch fv := v; fv.Kind() {
+	case reflect.Slice:
+		at := v.Type()
+		if at.Elem().Kind() == reflect.Uint8 {
+			// Special case for []byte
+			if tok.value[0] != '"' && tok.value[0] != '\'' {
+				// Deliberately written out here, as the error after
+				// this switch statement would write "invalid []byte: ...",
+				// which is not as user-friendly.
+				return p.errorf("invalid string: %v", tok.value)
+			}
+			bytes := []byte(tok.unquoted)
+			fv.Set(reflect.ValueOf(bytes))
+			return nil
+		}
+		// Repeated field.
+		if tok.value == "[" {
+			// Repeated field with list notation, like [1,2,3].
+			for {
+				fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+				err := p.readAny(fv.Index(fv.Len()-1), props)
+				if err != nil {
+					return err
+				}
+				tok := p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				if tok.value == "]" {
+					break
+				}
+				if tok.value != "," {
+					return p.errorf("Expected ']' or ',' found %q", tok.value)
+				}
+			}
+			return nil
+		}
+		// One value of the repeated field.
+		p.back()
+		fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+		return p.readAny(fv.Index(fv.Len()-1), props)
+	case reflect.Bool:
+		// true/1/t/True or false/f/0/False.
+		switch tok.value {
+		case "true", "1", "t", "True":
+			fv.SetBool(true)
+			return nil
+		case "false", "0", "f", "False":
+			fv.SetBool(false)
+			return nil
+		}
+	case reflect.Float32, reflect.Float64:
+		v := tok.value
+		// Ignore 'f' for compatibility with output generated by C++, but don't
+		// remove 'f' when the value is "-inf" or "inf".
+		if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+			v = v[:len(v)-1]
+		}
+		if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+			fv.SetFloat(f)
+			return nil
+		}
+	case reflect.Int32:
+		if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+
+		if len(props.Enum) == 0 {
+			break
+		}
+		m, ok := enumValueMaps[props.Enum]
+		if !ok {
+			break
+		}
+		x, ok := m[tok.value]
+		if !ok {
+			break
+		}
+		fv.SetInt(int64(x))
+		return nil
+	case reflect.Int64:
+		if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+
+	case reflect.Ptr:
+		// A basic field (indirected through pointer), or a repeated message/group
+		p.back()
+		fv.Set(reflect.New(fv.Type().Elem()))
+		return p.readAny(fv.Elem(), props)
+	case reflect.String:
+		if tok.value[0] == '"' || tok.value[0] == '\'' {
+			fv.SetString(tok.unquoted)
+			return nil
+		}
+	case reflect.Struct:
+		var terminator string
+		switch tok.value {
+		case "{":
+			terminator = "}"
+		case "<":
+			terminator = ">"
+		default:
+			return p.errorf("expected '{' or '<', found %q", tok.value)
+		}
+		// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+		return p.readStruct(fv, terminator)
+	case reflect.Uint32:
+		if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+			fv.SetUint(uint64(x))
+			return nil
+		}
+	case reflect.Uint64:
+		if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+			fv.SetUint(x)
+			return nil
+		}
+	}
+	return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+	if um, ok := pb.(encoding.TextUnmarshaler); ok {
+		return um.UnmarshalText([]byte(s))
+	}
+	pb.Reset()
+	v := reflect.ValueOf(pb)
+	return newTextParser(s).readStruct(v.Elem(), "")
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
new file mode 100644
index 0000000..8697a50
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
@@ -0,0 +1,872 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+//  Based on original Protocol Buffers design by
+//  Sanjay Ghemawat, Jeff Dean, and others.
+//
+// The messages in this file describe the definitions found in .proto files.
+// A valid .proto file can be translated directly to a FileDescriptorProto
+// without any other information (e.g. without reading its imports).
+
+
+syntax = "proto2";
+
+package google.protobuf;
+option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DescriptorProtos";
+option csharp_namespace = "Google.Protobuf.Reflection";
+option objc_class_prefix = "GPB";
+option cc_enable_arenas = true;
+
+// descriptor.proto must be optimized for speed because reflection-based
+// algorithms don't work during bootstrapping.
+option optimize_for = SPEED;
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+message FileDescriptorSet {
+  repeated FileDescriptorProto file = 1;
+}
+
+// Describes a complete .proto file.
+message FileDescriptorProto {
+  optional string name = 1;       // file name, relative to root of source tree
+  optional string package = 2;    // e.g. "foo", "foo.bar", etc.
+
+  // Names of files imported by this file.
+  repeated string dependency = 3;
+  // Indexes of the public imported files in the dependency list above.
+  repeated int32 public_dependency = 10;
+  // Indexes of the weak imported files in the dependency list.
+  // For Google-internal migration only. Do not use.
+  repeated int32 weak_dependency = 11;
+
+  // All top-level definitions in this file.
+  repeated DescriptorProto message_type = 4;
+  repeated EnumDescriptorProto enum_type = 5;
+  repeated ServiceDescriptorProto service = 6;
+  repeated FieldDescriptorProto extension = 7;
+
+  optional FileOptions options = 8;
+
+  // This field contains optional information about the original source code.
+  // You may safely remove this entire field without harming runtime
+  // functionality of the descriptors -- the information is needed only by
+  // development tools.
+  optional SourceCodeInfo source_code_info = 9;
+
+  // The syntax of the proto file.
+  // The supported values are "proto2" and "proto3".
+  optional string syntax = 12;
+}
+
+// Describes a message type.
+message DescriptorProto {
+  optional string name = 1;
+
+  repeated FieldDescriptorProto field = 2;
+  repeated FieldDescriptorProto extension = 6;
+
+  repeated DescriptorProto nested_type = 3;
+  repeated EnumDescriptorProto enum_type = 4;
+
+  message ExtensionRange {
+    optional int32 start = 1;
+    optional int32 end = 2;
+
+    optional ExtensionRangeOptions options = 3;
+  }
+  repeated ExtensionRange extension_range = 5;
+
+  repeated OneofDescriptorProto oneof_decl = 8;
+
+  optional MessageOptions options = 7;
+
+  // Range of reserved tag numbers. Reserved tag numbers may not be used by
+  // fields or extension ranges in the same message. Reserved ranges may
+  // not overlap.
+  message ReservedRange {
+    optional int32 start = 1; // Inclusive.
+    optional int32 end = 2;   // Exclusive.
+  }
+  repeated ReservedRange reserved_range = 9;
+  // Reserved field names, which may not be used by fields in the same message.
+  // A given name may only be reserved once.
+  repeated string reserved_name = 10;
+}
+
+message ExtensionRangeOptions {
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+// Describes a field within a message.
+message FieldDescriptorProto {
+  enum Type {
+    // 0 is reserved for errors.
+    // Order is weird for historical reasons.
+    TYPE_DOUBLE         = 1;
+    TYPE_FLOAT          = 2;
+    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if
+    // negative values are likely.
+    TYPE_INT64          = 3;
+    TYPE_UINT64         = 4;
+    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if
+    // negative values are likely.
+    TYPE_INT32          = 5;
+    TYPE_FIXED64        = 6;
+    TYPE_FIXED32        = 7;
+    TYPE_BOOL           = 8;
+    TYPE_STRING         = 9;
+    // Tag-delimited aggregate.
+    // Group type is deprecated and not supported in proto3. However, Proto3
+    // implementations should still be able to parse the group wire format and
+    // treat group fields as unknown fields.
+    TYPE_GROUP          = 10;
+    TYPE_MESSAGE        = 11;  // Length-delimited aggregate.
+
+    // New in version 2.
+    TYPE_BYTES          = 12;
+    TYPE_UINT32         = 13;
+    TYPE_ENUM           = 14;
+    TYPE_SFIXED32       = 15;
+    TYPE_SFIXED64       = 16;
+    TYPE_SINT32         = 17;  // Uses ZigZag encoding.
+    TYPE_SINT64         = 18;  // Uses ZigZag encoding.
+  };
+
+  enum Label {
+    // 0 is reserved for errors
+    LABEL_OPTIONAL      = 1;
+    LABEL_REQUIRED      = 2;
+    LABEL_REPEATED      = 3;
+  };
+
+  optional string name = 1;
+  optional int32 number = 3;
+  optional Label label = 4;
+
+  // If type_name is set, this need not be set.  If both this and type_name
+  // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+  optional Type type = 5;
+
+  // For message and enum types, this is the name of the type.  If the name
+  // starts with a '.', it is fully-qualified.  Otherwise, C++-like scoping
+  // rules are used to find the type (i.e. first the nested types within this
+  // message are searched, then within the parent, on up to the root
+  // namespace).
+  optional string type_name = 6;
+
+  // For extensions, this is the name of the type being extended.  It is
+  // resolved in the same manner as type_name.
+  optional string extendee = 2;
+
+  // For numeric types, contains the original text representation of the value.
+  // For booleans, "true" or "false".
+  // For strings, contains the default text contents (not escaped in any way).
+  // For bytes, contains the C escaped value.  All bytes >= 128 are escaped.
+  // TODO(kenton):  Base-64 encode?
+  optional string default_value = 7;
+
+  // If set, gives the index of a oneof in the containing type's oneof_decl
+  // list.  This field is a member of that oneof.
+  optional int32 oneof_index = 9;
+
+  // JSON name of this field. The value is set by protocol compiler. If the
+  // user has set a "json_name" option on this field, that option's value
+  // will be used. Otherwise, it's deduced from the field's name by converting
+  // it to camelCase.
+  optional string json_name = 10;
+
+  optional FieldOptions options = 8;
+}
+
+// Describes a oneof.
+message OneofDescriptorProto {
+  optional string name = 1;
+  optional OneofOptions options = 2;
+}
+
+// Describes an enum type.
+message EnumDescriptorProto {
+  optional string name = 1;
+
+  repeated EnumValueDescriptorProto value = 2;
+
+  optional EnumOptions options = 3;
+
+  // Range of reserved numeric values. Reserved values may not be used by
+  // entries in the same enum. Reserved ranges may not overlap.
+  //
+  // Note that this is distinct from DescriptorProto.ReservedRange in that it
+  // is inclusive such that it can appropriately represent the entire int32
+  // domain.
+  message EnumReservedRange {
+    optional int32 start = 1; // Inclusive.
+    optional int32 end = 2;   // Inclusive.
+  }
+
+  // Range of reserved numeric values. Reserved numeric values may not be used
+  // by enum values in the same enum declaration. Reserved ranges may not
+  // overlap.
+  repeated EnumReservedRange reserved_range = 4;
+
+  // Reserved enum value names, which may not be reused. A given name may only
+  // be reserved once.
+  repeated string reserved_name = 5;
+}
+
+// Describes a value within an enum.
+message EnumValueDescriptorProto {
+  optional string name = 1;
+  optional int32 number = 2;
+
+  optional EnumValueOptions options = 3;
+}
+
+// Describes a service.
+message ServiceDescriptorProto {
+  optional string name = 1;
+  repeated MethodDescriptorProto method = 2;
+
+  optional ServiceOptions options = 3;
+}
+
+// Describes a method of a service.
+message MethodDescriptorProto {
+  optional string name = 1;
+
+  // Input and output type names.  These are resolved in the same way as
+  // FieldDescriptorProto.type_name, but must refer to a message type.
+  optional string input_type = 2;
+  optional string output_type = 3;
+
+  optional MethodOptions options = 4;
+
+  // Identifies if client streams multiple client messages
+  optional bool client_streaming = 5 [default=false];
+  // Identifies if server streams multiple server messages
+  optional bool server_streaming = 6 [default=false];
+}
+
+
+// ===================================================================
+// Options
+
+// Each of the definitions above may have "options" attached.  These are
+// just annotations which may cause code to be generated slightly differently
+// or may contain hints for code that manipulates protocol messages.
+//
+// Clients may define custom options as extensions of the *Options messages.
+// These extensions may not yet be known at parsing time, so the parser cannot
+// store the values in them.  Instead it stores them in a field in the *Options
+// message called uninterpreted_option. This field must have the same name
+// across all *Options messages. We then use this field to populate the
+// extensions when we build a descriptor, at which point all protos have been
+// parsed and so all extensions are known.
+//
+// Extension numbers for custom options may be chosen as follows:
+// * For options which will only be used within a single application or
+//   organization, or for experimental options, use field numbers 50000
+//   through 99999.  It is up to you to ensure that you do not use the
+//   same number for multiple options.
+// * For options which will be published and used publicly by multiple
+//   independent entities, e-mail protobuf-global-extension-registry@google.com
+//   to reserve extension numbers. Simply provide your project name (e.g.
+//   Objective-C plugin) and your project website (if available) -- there's no
+//   need to explain how you intend to use them. Usually you only need one
+//   extension number. You can declare multiple options with only one extension
+//   number by putting them in a sub-message. See the Custom Options section of
+//   the docs for examples:
+//   https://developers.google.com/protocol-buffers/docs/proto#options
+//   If this turns out to be popular, a web service will be set up
+//   to automatically assign option numbers.
+
+
+message FileOptions {
+
+  // Sets the Java package where classes generated from this .proto will be
+  // placed.  By default, the proto package is used, but this is often
+  // inappropriate because proto packages do not normally start with backwards
+  // domain names.
+  optional string java_package = 1;
+
+
+  // If set, all the classes from the .proto file are wrapped in a single
+  // outer class with the given name.  This applies to both Proto1
+  // (equivalent to the old "--one_java_file" option) and Proto2 (where
+  // a .proto always translates to a single class, but you may want to
+  // explicitly choose the class name).
+  optional string java_outer_classname = 8;
+
+  // If set true, then the Java code generator will generate a separate .java
+  // file for each top-level message, enum, and service defined in the .proto
+  // file.  Thus, these types will *not* be nested inside the outer class
+  // named by java_outer_classname.  However, the outer class will still be
+  // generated to contain the file's getDescriptor() method as well as any
+  // top-level extensions defined in the file.
+  optional bool java_multiple_files = 10 [default=false];
+
+  // This option does nothing.
+  optional bool java_generate_equals_and_hash = 20 [deprecated=true];
+
+  // If set true, then the Java2 code generator will generate code that
+  // throws an exception whenever an attempt is made to assign a non-UTF-8
+  // byte sequence to a string field.
+  // Message reflection will do the same.
+  // However, an extension field still accepts non-UTF-8 byte sequences.
+  // This option has no effect on when used with the lite runtime.
+  optional bool java_string_check_utf8 = 27 [default=false];
+
+
+  // Generated classes can be optimized for speed or code size.
+  enum OptimizeMode {
+    SPEED = 1;        // Generate complete code for parsing, serialization,
+                      // etc.
+    CODE_SIZE = 2;    // Use ReflectionOps to implement these methods.
+    LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
+  }
+  optional OptimizeMode optimize_for = 9 [default=SPEED];
+
+  // Sets the Go package where structs generated from this .proto will be
+  // placed. If omitted, the Go package will be derived from the following:
+  //   - The basename of the package import path, if provided.
+  //   - Otherwise, the package statement in the .proto file, if present.
+  //   - Otherwise, the basename of the .proto file, without extension.
+  optional string go_package = 11;
+
+
+
+  // Should generic services be generated in each language?  "Generic" services
+  // are not specific to any particular RPC system.  They are generated by the
+  // main code generators in each language (without additional plugins).
+  // Generic services were the only kind of service generation supported by
+  // early versions of google.protobuf.
+  //
+  // Generic services are now considered deprecated in favor of using plugins
+  // that generate code specific to your particular RPC system.  Therefore,
+  // these default to false.  Old code which depends on generic services should
+  // explicitly set them to true.
+  optional bool cc_generic_services = 16 [default=false];
+  optional bool java_generic_services = 17 [default=false];
+  optional bool py_generic_services = 18 [default=false];
+  optional bool php_generic_services = 42 [default=false];
+
+  // Is this file deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for everything in the file, or it will be completely ignored; in the very
+  // least, this is a formalization for deprecating files.
+  optional bool deprecated = 23 [default=false];
+
+  // Enables the use of arenas for the proto messages in this file. This applies
+  // only to generated classes for C++.
+  optional bool cc_enable_arenas = 31 [default=false];
+
+
+  // Sets the objective c class prefix which is prepended to all objective c
+  // generated classes from this .proto. There is no default.
+  optional string objc_class_prefix = 36;
+
+  // Namespace for generated classes; defaults to the package.
+  optional string csharp_namespace = 37;
+
+  // By default Swift generators will take the proto package and CamelCase it
+  // replacing '.' with underscore and use that to prefix the types/symbols
+  // defined. When this options is provided, they will use this value instead
+  // to prefix the types/symbols defined.
+  optional string swift_prefix = 39;
+
+  // Sets the php class prefix which is prepended to all php generated classes
+  // from this .proto. Default is empty.
+  optional string php_class_prefix = 40;
+
+  // Use this option to change the namespace of php generated classes. Default
+  // is empty. When this option is empty, the package name will be used for
+  // determining the namespace.
+  optional string php_namespace = 41;
+
+  // The parser stores options it doesn't recognize here.
+  // See the documentation for the "Options" section above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message.
+  // See the documentation for the "Options" section above.
+  extensions 1000 to max;
+
+  reserved 38;
+}
+
+message MessageOptions {
+  // Set true to use the old proto1 MessageSet wire format for extensions.
+  // This is provided for backwards-compatibility with the MessageSet wire
+  // format.  You should not use this for any other reason:  It's less
+  // efficient, has fewer features, and is more complicated.
+  //
+  // The message must be defined exactly as follows:
+  //   message Foo {
+  //     option message_set_wire_format = true;
+  //     extensions 4 to max;
+  //   }
+  // Note that the message cannot have any defined fields; MessageSets only
+  // have extensions.
+  //
+  // All extensions of your type must be singular messages; e.g. they cannot
+  // be int32s, enums, or repeated messages.
+  //
+  // Because this is an option, the above two restrictions are not enforced by
+  // the protocol compiler.
+  optional bool message_set_wire_format = 1 [default=false];
+
+  // Disables the generation of the standard "descriptor()" accessor, which can
+  // conflict with a field of the same name.  This is meant to make migration
+  // from proto1 easier; new code should avoid fields named "descriptor".
+  optional bool no_standard_descriptor_accessor = 2 [default=false];
+
+  // Is this message deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the message, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating messages.
+  optional bool deprecated = 3 [default=false];
+
+  // Whether the message is an automatically generated map entry type for the
+  // maps field.
+  //
+  // For maps fields:
+  //     map<KeyType, ValueType> map_field = 1;
+  // The parsed descriptor looks like:
+  //     message MapFieldEntry {
+  //         option map_entry = true;
+  //         optional KeyType key = 1;
+  //         optional ValueType value = 2;
+  //     }
+  //     repeated MapFieldEntry map_field = 1;
+  //
+  // Implementations may choose not to generate the map_entry=true message, but
+  // use a native map in the target language to hold the keys and values.
+  // The reflection APIs in such implementions still need to work as
+  // if the field is a repeated message field.
+  //
+  // NOTE: Do not set the option in .proto files. Always use the maps syntax
+  // instead. The option should only be implicitly set by the proto compiler
+  // parser.
+  optional bool map_entry = 7;
+
+  reserved 8;  // javalite_serializable
+  reserved 9;  // javanano_as_lite
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message FieldOptions {
+  // The ctype option instructs the C++ code generator to use a different
+  // representation of the field than it normally would.  See the specific
+  // options below.  This option is not yet implemented in the open source
+  // release -- sorry, we'll try to include it in a future version!
+  optional CType ctype = 1 [default = STRING];
+  enum CType {
+    // Default mode.
+    STRING = 0;
+
+    CORD = 1;
+
+    STRING_PIECE = 2;
+  }
+  // The packed option can be enabled for repeated primitive fields to enable
+  // a more efficient representation on the wire. Rather than repeatedly
+  // writing the tag and type for each element, the entire array is encoded as
+  // a single length-delimited blob. In proto3, only explicit setting it to
+  // false will avoid using packed encoding.
+  optional bool packed = 2;
+
+  // The jstype option determines the JavaScript type used for values of the
+  // field.  The option is permitted only for 64 bit integral and fixed types
+  // (int64, uint64, sint64, fixed64, sfixed64).  A field with jstype JS_STRING
+  // is represented as JavaScript string, which avoids loss of precision that
+  // can happen when a large value is converted to a floating point JavaScript.
+  // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
+  // use the JavaScript "number" type.  The behavior of the default option
+  // JS_NORMAL is implementation dependent.
+  //
+  // This option is an enum to permit additional types to be added, e.g.
+  // goog.math.Integer.
+  optional JSType jstype = 6 [default = JS_NORMAL];
+  enum JSType {
+    // Use the default type.
+    JS_NORMAL = 0;
+
+    // Use JavaScript strings.
+    JS_STRING = 1;
+
+    // Use JavaScript numbers.
+    JS_NUMBER = 2;
+  }
+
+  // Should this field be parsed lazily?  Lazy applies only to message-type
+  // fields.  It means that when the outer message is initially parsed, the
+  // inner message's contents will not be parsed but instead stored in encoded
+  // form.  The inner message will actually be parsed when it is first accessed.
+  //
+  // This is only a hint.  Implementations are free to choose whether to use
+  // eager or lazy parsing regardless of the value of this option.  However,
+  // setting this option true suggests that the protocol author believes that
+  // using lazy parsing on this field is worth the additional bookkeeping
+  // overhead typically needed to implement it.
+  //
+  // This option does not affect the public interface of any generated code;
+  // all method signatures remain the same.  Furthermore, thread-safety of the
+  // interface is not affected by this option; const methods remain safe to
+  // call from multiple threads concurrently, while non-const methods continue
+  // to require exclusive access.
+  //
+  //
+  // Note that implementations may choose not to check required fields within
+  // a lazy sub-message.  That is, calling IsInitialized() on the outer message
+  // may return true even if the inner message has missing required fields.
+  // This is necessary because otherwise the inner message would have to be
+  // parsed in order to perform the check, defeating the purpose of lazy
+  // parsing.  An implementation which chooses not to check required fields
+  // must be consistent about it.  That is, for any particular sub-message, the
+  // implementation must either *always* check its required fields, or *never*
+  // check its required fields, regardless of whether or not the message has
+  // been parsed.
+  optional bool lazy = 5 [default=false];
+
+  // Is this field deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for accessors, or it will be completely ignored; in the very least, this
+  // is a formalization for deprecating fields.
+  optional bool deprecated = 3 [default=false];
+
+  // For Google-internal migration only. Do not use.
+  optional bool weak = 10 [default=false];
+
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+
+  reserved 4;  // removed jtype
+}
+
+message OneofOptions {
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message EnumOptions {
+
+  // Set this option to true to allow mapping different tag names to the same
+  // value.
+  optional bool allow_alias = 2;
+
+  // Is this enum deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the enum, or it will be completely ignored; in the very least, this
+  // is a formalization for deprecating enums.
+  optional bool deprecated = 3 [default=false];
+
+  reserved 5;  // javanano_as_lite
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message EnumValueOptions {
+  // Is this enum value deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the enum value, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating enum values.
+  optional bool deprecated = 1 [default=false];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message ServiceOptions {
+
+  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC
+  //   framework.  We apologize for hoarding these numbers to ourselves, but
+  //   we were already using them long before we decided to release Protocol
+  //   Buffers.
+
+  // Is this service deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the service, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating services.
+  optional bool deprecated = 33 [default=false];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message MethodOptions {
+
+  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC
+  //   framework.  We apologize for hoarding these numbers to ourselves, but
+  //   we were already using them long before we decided to release Protocol
+  //   Buffers.
+
+  // Is this method deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the method, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating methods.
+  optional bool deprecated = 33 [default=false];
+
+  // Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
+  // or neither? HTTP based RPC implementation may choose GET verb for safe
+  // methods, and PUT verb for idempotent methods instead of the default POST.
+  enum IdempotencyLevel {
+    IDEMPOTENCY_UNKNOWN = 0;
+    NO_SIDE_EFFECTS     = 1; // implies idempotent
+    IDEMPOTENT          = 2; // idempotent, but may have side effects
+  }
+  optional IdempotencyLevel idempotency_level =
+      34 [default=IDEMPOTENCY_UNKNOWN];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+message UninterpretedOption {
+  // The name of the uninterpreted option.  Each string represents a segment in
+  // a dot-separated name.  is_extension is true iff a segment represents an
+  // extension (denoted with parentheses in options specs in .proto files).
+  // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+  // "foo.(bar.baz).qux".
+  message NamePart {
+    required string name_part = 1;
+    required bool is_extension = 2;
+  }
+  repeated NamePart name = 2;
+
+  // The value of the uninterpreted option, in whatever type the tokenizer
+  // identified it as during parsing. Exactly one of these should be set.
+  optional string identifier_value = 3;
+  optional uint64 positive_int_value = 4;
+  optional int64 negative_int_value = 5;
+  optional double double_value = 6;
+  optional bytes string_value = 7;
+  optional string aggregate_value = 8;
+}
+
+// ===================================================================
+// Optional source code info
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+message SourceCodeInfo {
+  // A Location identifies a piece of source code in a .proto file which
+  // corresponds to a particular definition.  This information is intended
+  // to be useful to IDEs, code indexers, documentation generators, and similar
+  // tools.
+  //
+  // For example, say we have a file like:
+  //   message Foo {
+  //     optional string foo = 1;
+  //   }
+  // Let's look at just the field definition:
+  //   optional string foo = 1;
+  //   ^       ^^     ^^  ^  ^^^
+  //   a       bc     de  f  ghi
+  // We have the following locations:
+  //   span   path               represents
+  //   [a,i)  [ 4, 0, 2, 0 ]     The whole field definition.
+  //   [a,b)  [ 4, 0, 2, 0, 4 ]  The label (optional).
+  //   [c,d)  [ 4, 0, 2, 0, 5 ]  The type (string).
+  //   [e,f)  [ 4, 0, 2, 0, 1 ]  The name (foo).
+  //   [g,h)  [ 4, 0, 2, 0, 3 ]  The number (1).
+  //
+  // Notes:
+  // - A location may refer to a repeated field itself (i.e. not to any
+  //   particular index within it).  This is used whenever a set of elements are
+  //   logically enclosed in a single code segment.  For example, an entire
+  //   extend block (possibly containing multiple extension definitions) will
+  //   have an outer location whose path refers to the "extensions" repeated
+  //   field without an index.
+  // - Multiple locations may have the same path.  This happens when a single
+  //   logical declaration is spread out across multiple places.  The most
+  //   obvious example is the "extend" block again -- there may be multiple
+  //   extend blocks in the same scope, each of which will have the same path.
+  // - A location's span is not always a subset of its parent's span.  For
+  //   example, the "extendee" of an extension declaration appears at the
+  //   beginning of the "extend" block and is shared by all extensions within
+  //   the block.
+  // - Just because a location's span is a subset of some other location's span
+  //   does not mean that it is a descendent.  For example, a "group" defines
+  //   both a type and a field in a single declaration.  Thus, the locations
+  //   corresponding to the type and field and their components will overlap.
+  // - Code which tries to interpret locations should probably be designed to
+  //   ignore those that it doesn't understand, as more types of locations could
+  //   be recorded in the future.
+  repeated Location location = 1;
+  message Location {
+    // Identifies which part of the FileDescriptorProto was defined at this
+    // location.
+    //
+    // Each element is a field number or an index.  They form a path from
+    // the root FileDescriptorProto to the place where the definition.  For
+    // example, this path:
+    //   [ 4, 3, 2, 7, 1 ]
+    // refers to:
+    //   file.message_type(3)  // 4, 3
+    //       .field(7)         // 2, 7
+    //       .name()           // 1
+    // This is because FileDescriptorProto.message_type has field number 4:
+    //   repeated DescriptorProto message_type = 4;
+    // and DescriptorProto.field has field number 2:
+    //   repeated FieldDescriptorProto field = 2;
+    // and FieldDescriptorProto.name has field number 1:
+    //   optional string name = 1;
+    //
+    // Thus, the above path gives the location of a field name.  If we removed
+    // the last element:
+    //   [ 4, 3, 2, 7 ]
+    // this path refers to the whole field declaration (from the beginning
+    // of the label to the terminating semicolon).
+    repeated int32 path = 1 [packed=true];
+
+    // Always has exactly three or four elements: start line, start column,
+    // end line (optional, otherwise assumed same as start line), end column.
+    // These are packed into a single field for efficiency.  Note that line
+    // and column numbers are zero-based -- typically you will want to add
+    // 1 to each before displaying to a user.
+    repeated int32 span = 2 [packed=true];
+
+    // If this SourceCodeInfo represents a complete declaration, these are any
+    // comments appearing before and after the declaration which appear to be
+    // attached to the declaration.
+    //
+    // A series of line comments appearing on consecutive lines, with no other
+    // tokens appearing on those lines, will be treated as a single comment.
+    //
+    // leading_detached_comments will keep paragraphs of comments that appear
+    // before (but not connected to) the current element. Each paragraph,
+    // separated by empty lines, will be one comment element in the repeated
+    // field.
+    //
+    // Only the comment content is provided; comment markers (e.g. //) are
+    // stripped out.  For block comments, leading whitespace and an asterisk
+    // will be stripped from the beginning of each line other than the first.
+    // Newlines are included in the output.
+    //
+    // Examples:
+    //
+    //   optional int32 foo = 1;  // Comment attached to foo.
+    //   // Comment attached to bar.
+    //   optional int32 bar = 2;
+    //
+    //   optional string baz = 3;
+    //   // Comment attached to baz.
+    //   // Another line attached to baz.
+    //
+    //   // Comment attached to qux.
+    //   //
+    //   // Another line attached to qux.
+    //   optional double qux = 4;
+    //
+    //   // Detached comment for corge. This is not leading or trailing comments
+    //   // to qux or corge because there are blank lines separating it from
+    //   // both.
+    //
+    //   // Detached comment for corge paragraph 2.
+    //
+    //   optional string corge = 5;
+    //   /* Block comment attached
+    //    * to corge.  Leading asterisks
+    //    * will be removed. */
+    //   /* Block comment attached to
+    //    * grault. */
+    //   optional int32 grault = 6;
+    //
+    //   // ignored detached comments.
+    optional string leading_comments = 3;
+    optional string trailing_comments = 4;
+    repeated string leading_detached_comments = 6;
+  }
+}
+
+// Describes the relationship between generated code and its original source
+// file. A GeneratedCodeInfo message is associated with only one generated
+// source file, but may contain references to different source .proto files.
+message GeneratedCodeInfo {
+  // An Annotation connects some span of text in generated code to an element
+  // of its generating .proto file.
+  repeated Annotation annotation = 1;
+  message Annotation {
+    // Identifies the element in the original source .proto file. This field
+    // is formatted the same as SourceCodeInfo.Location.path.
+    repeated int32 path = 1 [packed=true];
+
+    // Identifies the filesystem path to the original source .proto.
+    optional string source_file = 2;
+
+    // Identifies the starting offset in bytes in the generated code
+    // that relates to the identified object.
+    optional int32 begin = 3;
+
+    // Identifies the ending offset in bytes in the generated code that
+    // relates to the identified offset. The end offset should be one past
+    // the last relevant byte (so the length of the text = end - begin).
+    optional int32 end = 4;
+  }
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go
new file mode 100644
index 0000000..0d6055d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/doc.go
@@ -0,0 +1,51 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+	A plugin for the Google protocol buffer compiler to generate Go code.
+	Run it by building this program and putting it in your path with the name
+		protoc-gen-go
+	That word 'go' at the end becomes part of the option string set for the
+	protocol compiler, so once the protocol compiler (protoc) is installed
+	you can run
+		protoc --go_out=output_directory input_directory/file.proto
+	to generate Go bindings for the protocol defined by file.proto.
+	With that input, the output will be written to
+		output_directory/file.pb.go
+
+	The generated code is documented in the package comment for
+	the library.
+
+	See the README and documentation for protocol buffers to learn more:
+		https://developers.google.com/protocol-buffers/
+
+*/
+package documentation
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go
new file mode 100644
index 0000000..c13a9f1
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go
@@ -0,0 +1,3086 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+	The code generator for the plugin for the Google protocol buffer compiler.
+	It generates Go code from the protocol buffer description files read by the
+	main routine.
+*/
+package generator
+
+import (
+	"bufio"
+	"bytes"
+	"compress/gzip"
+	"crypto/sha256"
+	"encoding/hex"
+	"fmt"
+	"go/build"
+	"go/parser"
+	"go/printer"
+	"go/token"
+	"log"
+	"os"
+	"path"
+	"sort"
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/generator/internal/remap"
+
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+	plugin "github.com/golang/protobuf/protoc-gen-go/plugin"
+)
+
+// generatedCodeVersion indicates a version of the generated code.
+// It is incremented whenever an incompatibility between the generated code and
+// proto package is introduced; the generated code references
+// a constant, proto.ProtoPackageIsVersionN (where N is generatedCodeVersion).
+const generatedCodeVersion = 2
+
+// A Plugin provides functionality to add to the output during Go code generation,
+// such as to produce RPC stubs.
+type Plugin interface {
+	// Name identifies the plugin.
+	Name() string
+	// Init is called once after data structures are built but before
+	// code generation begins.
+	Init(g *Generator)
+	// Generate produces the code generated by the plugin for this file,
+	// except for the imports, by calling the generator's methods P, In, and Out.
+	Generate(file *FileDescriptor)
+	// GenerateImports produces the import declarations for this file.
+	// It is called after Generate.
+	GenerateImports(file *FileDescriptor)
+}
+
+var plugins []Plugin
+
+// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated.
+// It is typically called during initialization.
+func RegisterPlugin(p Plugin) {
+	plugins = append(plugins, p)
+}
+
+// A GoImportPath is the import path of a Go package. e.g., "google.golang.org/genproto/protobuf".
+type GoImportPath string
+
+func (p GoImportPath) String() string { return strconv.Quote(string(p)) }
+
+// A GoPackageName is the name of a Go package. e.g., "protobuf".
+type GoPackageName string
+
+// Each type we import as a protocol buffer (other than FileDescriptorProto) needs
+// a pointer to the FileDescriptorProto that represents it.  These types achieve that
+// wrapping by placing each Proto inside a struct with the pointer to its File. The
+// structs have the same names as their contents, with "Proto" removed.
+// FileDescriptor is used to store the things that it points to.
+
+// The file and package name method are common to messages and enums.
+type common struct {
+	file *FileDescriptor // File this object comes from.
+}
+
+// GoImportPath is the import path of the Go package containing the type.
+func (c *common) GoImportPath() GoImportPath {
+	return c.file.importPath
+}
+
+func (c *common) File() *FileDescriptor { return c.file }
+
+func fileIsProto3(file *descriptor.FileDescriptorProto) bool {
+	return file.GetSyntax() == "proto3"
+}
+
+func (c *common) proto3() bool { return fileIsProto3(c.file.FileDescriptorProto) }
+
+// Descriptor represents a protocol buffer message.
+type Descriptor struct {
+	common
+	*descriptor.DescriptorProto
+	parent   *Descriptor            // The containing message, if any.
+	nested   []*Descriptor          // Inner messages, if any.
+	enums    []*EnumDescriptor      // Inner enums, if any.
+	ext      []*ExtensionDescriptor // Extensions, if any.
+	typename []string               // Cached typename vector.
+	index    int                    // The index into the container, whether the file or another message.
+	path     string                 // The SourceCodeInfo path as comma-separated integers.
+	group    bool
+}
+
+// TypeName returns the elements of the dotted type name.
+// The package name is not part of this name.
+func (d *Descriptor) TypeName() []string {
+	if d.typename != nil {
+		return d.typename
+	}
+	n := 0
+	for parent := d; parent != nil; parent = parent.parent {
+		n++
+	}
+	s := make([]string, n)
+	for parent := d; parent != nil; parent = parent.parent {
+		n--
+		s[n] = parent.GetName()
+	}
+	d.typename = s
+	return s
+}
+
+// EnumDescriptor describes an enum. If it's at top level, its parent will be nil.
+// Otherwise it will be the descriptor of the message in which it is defined.
+type EnumDescriptor struct {
+	common
+	*descriptor.EnumDescriptorProto
+	parent   *Descriptor // The containing message, if any.
+	typename []string    // Cached typename vector.
+	index    int         // The index into the container, whether the file or a message.
+	path     string      // The SourceCodeInfo path as comma-separated integers.
+}
+
+// TypeName returns the elements of the dotted type name.
+// The package name is not part of this name.
+func (e *EnumDescriptor) TypeName() (s []string) {
+	if e.typename != nil {
+		return e.typename
+	}
+	name := e.GetName()
+	if e.parent == nil {
+		s = make([]string, 1)
+	} else {
+		pname := e.parent.TypeName()
+		s = make([]string, len(pname)+1)
+		copy(s, pname)
+	}
+	s[len(s)-1] = name
+	e.typename = s
+	return s
+}
+
+// Everything but the last element of the full type name, CamelCased.
+// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... .
+func (e *EnumDescriptor) prefix() string {
+	if e.parent == nil {
+		// If the enum is not part of a message, the prefix is just the type name.
+		return CamelCase(*e.Name) + "_"
+	}
+	typeName := e.TypeName()
+	return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_"
+}
+
+// The integer value of the named constant in this enumerated type.
+func (e *EnumDescriptor) integerValueAsString(name string) string {
+	for _, c := range e.Value {
+		if c.GetName() == name {
+			return fmt.Sprint(c.GetNumber())
+		}
+	}
+	log.Fatal("cannot find value for enum constant")
+	return ""
+}
+
+// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil.
+// Otherwise it will be the descriptor of the message in which it is defined.
+type ExtensionDescriptor struct {
+	common
+	*descriptor.FieldDescriptorProto
+	parent *Descriptor // The containing message, if any.
+}
+
+// TypeName returns the elements of the dotted type name.
+// The package name is not part of this name.
+func (e *ExtensionDescriptor) TypeName() (s []string) {
+	name := e.GetName()
+	if e.parent == nil {
+		// top-level extension
+		s = make([]string, 1)
+	} else {
+		pname := e.parent.TypeName()
+		s = make([]string, len(pname)+1)
+		copy(s, pname)
+	}
+	s[len(s)-1] = name
+	return s
+}
+
+// DescName returns the variable name used for the generated descriptor.
+func (e *ExtensionDescriptor) DescName() string {
+	// The full type name.
+	typeName := e.TypeName()
+	// Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix.
+	for i, s := range typeName {
+		typeName[i] = CamelCase(s)
+	}
+	return "E_" + strings.Join(typeName, "_")
+}
+
+// ImportedDescriptor describes a type that has been publicly imported from another file.
+type ImportedDescriptor struct {
+	common
+	o Object
+}
+
+func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() }
+
+// FileDescriptor describes an protocol buffer descriptor file (.proto).
+// It includes slices of all the messages and enums defined within it.
+// Those slices are constructed by WrapTypes.
+type FileDescriptor struct {
+	*descriptor.FileDescriptorProto
+	desc []*Descriptor          // All the messages defined in this file.
+	enum []*EnumDescriptor      // All the enums defined in this file.
+	ext  []*ExtensionDescriptor // All the top-level extensions defined in this file.
+	imp  []*ImportedDescriptor  // All types defined in files publicly imported by this file.
+
+	// Comments, stored as a map of path (comma-separated integers) to the comment.
+	comments map[string]*descriptor.SourceCodeInfo_Location
+
+	// The full list of symbols that are exported,
+	// as a map from the exported object to its symbols.
+	// This is used for supporting public imports.
+	exported map[Object][]symbol
+
+	fingerprint string        // Fingerprint of this file's contents.
+	importPath  GoImportPath  // Import path of this file's package.
+	packageName GoPackageName // Name of this file's Go package.
+
+	proto3 bool // whether to generate proto3 code for this file
+}
+
+// VarName is the variable name we'll use in the generated code to refer
+// to the compressed bytes of this descriptor. It is not exported, so
+// it is only valid inside the generated package.
+func (d *FileDescriptor) VarName() string {
+	name := strings.Map(badToUnderscore, baseName(d.GetName()))
+	return fmt.Sprintf("fileDescriptor_%s_%s", name, d.fingerprint)
+}
+
+// goPackageOption interprets the file's go_package option.
+// If there is no go_package, it returns ("", "", false).
+// If there's a simple name, it returns ("", pkg, true).
+// If the option implies an import path, it returns (impPath, pkg, true).
+func (d *FileDescriptor) goPackageOption() (impPath GoImportPath, pkg GoPackageName, ok bool) {
+	opt := d.GetOptions().GetGoPackage()
+	if opt == "" {
+		return "", "", false
+	}
+	// A semicolon-delimited suffix delimits the import path and package name.
+	sc := strings.Index(opt, ";")
+	if sc >= 0 {
+		return GoImportPath(opt[:sc]), cleanPackageName(opt[sc+1:]), true
+	}
+	// The presence of a slash implies there's an import path.
+	slash := strings.LastIndex(opt, "/")
+	if slash >= 0 {
+		return GoImportPath(opt), cleanPackageName(opt[slash+1:]), true
+	}
+	return "", cleanPackageName(opt), true
+}
+
+// goFileName returns the output name for the generated Go file.
+func (d *FileDescriptor) goFileName(pathType pathType) string {
+	name := *d.Name
+	if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" {
+		name = name[:len(name)-len(ext)]
+	}
+	name += ".pb.go"
+
+	if pathType == pathTypeSourceRelative {
+		return name
+	}
+
+	// Does the file have a "go_package" option?
+	// If it does, it may override the filename.
+	if impPath, _, ok := d.goPackageOption(); ok && impPath != "" {
+		// Replace the existing dirname with the declared import path.
+		_, name = path.Split(name)
+		name = path.Join(string(impPath), name)
+		return name
+	}
+
+	return name
+}
+
+func (d *FileDescriptor) addExport(obj Object, sym symbol) {
+	d.exported[obj] = append(d.exported[obj], sym)
+}
+
+// symbol is an interface representing an exported Go symbol.
+type symbol interface {
+	// GenerateAlias should generate an appropriate alias
+	// for the symbol from the named package.
+	GenerateAlias(g *Generator, pkg GoPackageName)
+}
+
+type messageSymbol struct {
+	sym                         string
+	hasExtensions, isMessageSet bool
+	oneofTypes                  []string
+}
+
+type getterSymbol struct {
+	name     string
+	typ      string
+	typeName string // canonical name in proto world; empty for proto.Message and similar
+	genType  bool   // whether typ contains a generated type (message/group/enum)
+}
+
+func (ms *messageSymbol) GenerateAlias(g *Generator, pkg GoPackageName) {
+	g.P("type ", ms.sym, " = ", pkg, ".", ms.sym)
+	for _, name := range ms.oneofTypes {
+		g.P("type ", name, " = ", pkg, ".", name)
+	}
+}
+
+type enumSymbol struct {
+	name   string
+	proto3 bool // Whether this came from a proto3 file.
+}
+
+func (es enumSymbol) GenerateAlias(g *Generator, pkg GoPackageName) {
+	s := es.name
+	g.P("type ", s, " = ", pkg, ".", s)
+	g.P("var ", s, "_name = ", pkg, ".", s, "_name")
+	g.P("var ", s, "_value = ", pkg, ".", s, "_value")
+}
+
+type constOrVarSymbol struct {
+	sym  string
+	typ  string // either "const" or "var"
+	cast string // if non-empty, a type cast is required (used for enums)
+}
+
+func (cs constOrVarSymbol) GenerateAlias(g *Generator, pkg GoPackageName) {
+	v := string(pkg) + "." + cs.sym
+	if cs.cast != "" {
+		v = cs.cast + "(" + v + ")"
+	}
+	g.P(cs.typ, " ", cs.sym, " = ", v)
+}
+
+// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects.
+type Object interface {
+	GoImportPath() GoImportPath
+	TypeName() []string
+	File() *FileDescriptor
+}
+
+// Generator is the type whose methods generate the output, stored in the associated response structure.
+type Generator struct {
+	*bytes.Buffer
+
+	Request  *plugin.CodeGeneratorRequest  // The input.
+	Response *plugin.CodeGeneratorResponse // The output.
+
+	Param             map[string]string // Command-line parameters.
+	PackageImportPath string            // Go import path of the package we're generating code for
+	ImportPrefix      string            // String to prefix to imported package file names.
+	ImportMap         map[string]string // Mapping from .proto file name to import path
+
+	Pkg map[string]string // The names under which we import support packages
+
+	outputImportPath GoImportPath                   // Package we're generating code for.
+	allFiles         []*FileDescriptor              // All files in the tree
+	allFilesByName   map[string]*FileDescriptor     // All files by filename.
+	genFiles         []*FileDescriptor              // Those files we will generate output for.
+	file             *FileDescriptor                // The file we are compiling now.
+	packageNames     map[GoImportPath]GoPackageName // Imported package names in the current file.
+	usedPackages     map[GoImportPath]bool          // Packages used in current file.
+	usedPackageNames map[GoPackageName]bool         // Package names used in the current file.
+	typeNameToObject map[string]Object              // Key is a fully-qualified name in input syntax.
+	init             []string                       // Lines to emit in the init function.
+	indent           string
+	pathType         pathType // How to generate output filenames.
+	writeOutput      bool
+	annotateCode     bool                                       // whether to store annotations
+	annotations      []*descriptor.GeneratedCodeInfo_Annotation // annotations to store
+}
+
+type pathType int
+
+const (
+	pathTypeImport pathType = iota
+	pathTypeSourceRelative
+)
+
+// New creates a new generator and allocates the request and response protobufs.
+func New() *Generator {
+	g := new(Generator)
+	g.Buffer = new(bytes.Buffer)
+	g.Request = new(plugin.CodeGeneratorRequest)
+	g.Response = new(plugin.CodeGeneratorResponse)
+	return g
+}
+
+// Error reports a problem, including an error, and exits the program.
+func (g *Generator) Error(err error, msgs ...string) {
+	s := strings.Join(msgs, " ") + ":" + err.Error()
+	log.Print("protoc-gen-go: error:", s)
+	os.Exit(1)
+}
+
+// Fail reports a problem and exits the program.
+func (g *Generator) Fail(msgs ...string) {
+	s := strings.Join(msgs, " ")
+	log.Print("protoc-gen-go: error:", s)
+	os.Exit(1)
+}
+
+// CommandLineParameters breaks the comma-separated list of key=value pairs
+// in the parameter (a member of the request protobuf) into a key/value map.
+// It then sets file name mappings defined by those entries.
+func (g *Generator) CommandLineParameters(parameter string) {
+	g.Param = make(map[string]string)
+	for _, p := range strings.Split(parameter, ",") {
+		if i := strings.Index(p, "="); i < 0 {
+			g.Param[p] = ""
+		} else {
+			g.Param[p[0:i]] = p[i+1:]
+		}
+	}
+
+	g.ImportMap = make(map[string]string)
+	pluginList := "none" // Default list of plugin names to enable (empty means all).
+	for k, v := range g.Param {
+		switch k {
+		case "import_prefix":
+			g.ImportPrefix = v
+		case "import_path":
+			g.PackageImportPath = v
+		case "paths":
+			switch v {
+			case "import":
+				g.pathType = pathTypeImport
+			case "source_relative":
+				g.pathType = pathTypeSourceRelative
+			default:
+				g.Fail(fmt.Sprintf(`Unknown path type %q: want "import" or "source_relative".`, v))
+			}
+		case "plugins":
+			pluginList = v
+		case "annotate_code":
+			if v == "true" {
+				g.annotateCode = true
+			}
+		default:
+			if len(k) > 0 && k[0] == 'M' {
+				g.ImportMap[k[1:]] = v
+			}
+		}
+	}
+	if pluginList != "" {
+		// Amend the set of plugins.
+		enabled := make(map[string]bool)
+		for _, name := range strings.Split(pluginList, "+") {
+			enabled[name] = true
+		}
+		var nplugins []Plugin
+		for _, p := range plugins {
+			if enabled[p.Name()] {
+				nplugins = append(nplugins, p)
+			}
+		}
+		plugins = nplugins
+	}
+}
+
+// DefaultPackageName returns the package name printed for the object.
+// If its file is in a different package, it returns the package name we're using for this file, plus ".".
+// Otherwise it returns the empty string.
+func (g *Generator) DefaultPackageName(obj Object) string {
+	importPath := obj.GoImportPath()
+	if importPath == g.outputImportPath {
+		return ""
+	}
+	return string(g.GoPackageName(importPath)) + "."
+}
+
+// GoPackageName returns the name used for a package.
+func (g *Generator) GoPackageName(importPath GoImportPath) GoPackageName {
+	if name, ok := g.packageNames[importPath]; ok {
+		return name
+	}
+	name := cleanPackageName(baseName(string(importPath)))
+	for i, orig := 1, name; g.usedPackageNames[name]; i++ {
+		name = orig + GoPackageName(strconv.Itoa(i))
+	}
+	g.packageNames[importPath] = name
+	g.usedPackageNames[name] = true
+	return name
+}
+
+var globalPackageNames = map[GoPackageName]bool{
+	"fmt":   true,
+	"math":  true,
+	"proto": true,
+}
+
+// Create and remember a guaranteed unique package name. Pkg is the candidate name.
+// The FileDescriptor parameter is unused.
+func RegisterUniquePackageName(pkg string, f *FileDescriptor) string {
+	name := cleanPackageName(pkg)
+	for i, orig := 1, name; globalPackageNames[name]; i++ {
+		name = orig + GoPackageName(strconv.Itoa(i))
+	}
+	globalPackageNames[name] = true
+	return string(name)
+}
+
+var isGoKeyword = map[string]bool{
+	"break":       true,
+	"case":        true,
+	"chan":        true,
+	"const":       true,
+	"continue":    true,
+	"default":     true,
+	"else":        true,
+	"defer":       true,
+	"fallthrough": true,
+	"for":         true,
+	"func":        true,
+	"go":          true,
+	"goto":        true,
+	"if":          true,
+	"import":      true,
+	"interface":   true,
+	"map":         true,
+	"package":     true,
+	"range":       true,
+	"return":      true,
+	"select":      true,
+	"struct":      true,
+	"switch":      true,
+	"type":        true,
+	"var":         true,
+}
+
+func cleanPackageName(name string) GoPackageName {
+	name = strings.Map(badToUnderscore, name)
+	// Identifier must not be keyword: insert _.
+	if isGoKeyword[name] {
+		name = "_" + name
+	}
+	// Identifier must not begin with digit: insert _.
+	if r, _ := utf8.DecodeRuneInString(name); unicode.IsDigit(r) {
+		name = "_" + name
+	}
+	return GoPackageName(name)
+}
+
+// defaultGoPackage returns the package name to use,
+// derived from the import path of the package we're building code for.
+func (g *Generator) defaultGoPackage() GoPackageName {
+	p := g.PackageImportPath
+	if i := strings.LastIndex(p, "/"); i >= 0 {
+		p = p[i+1:]
+	}
+	return cleanPackageName(p)
+}
+
+// SetPackageNames sets the package name for this run.
+// The package name must agree across all files being generated.
+// It also defines unique package names for all imported files.
+func (g *Generator) SetPackageNames() {
+	g.outputImportPath = g.genFiles[0].importPath
+
+	defaultPackageNames := make(map[GoImportPath]GoPackageName)
+	for _, f := range g.genFiles {
+		if _, p, ok := f.goPackageOption(); ok {
+			defaultPackageNames[f.importPath] = p
+		}
+	}
+	for _, f := range g.genFiles {
+		if _, p, ok := f.goPackageOption(); ok {
+			// Source file: option go_package = "quux/bar";
+			f.packageName = p
+		} else if p, ok := defaultPackageNames[f.importPath]; ok {
+			// A go_package option in another file in the same package.
+			//
+			// This is a poor choice in general, since every source file should
+			// contain a go_package option. Supported mainly for historical
+			// compatibility.
+			f.packageName = p
+		} else if p := g.defaultGoPackage(); p != "" {
+			// Command-line: import_path=quux/bar.
+			//
+			// The import_path flag sets a package name for files which don't
+			// contain a go_package option.
+			f.packageName = p
+		} else if p := f.GetPackage(); p != "" {
+			// Source file: package quux.bar;
+			f.packageName = cleanPackageName(p)
+		} else {
+			// Source filename.
+			f.packageName = cleanPackageName(baseName(f.GetName()))
+		}
+	}
+
+	// Check that all files have a consistent package name and import path.
+	for _, f := range g.genFiles[1:] {
+		if a, b := g.genFiles[0].importPath, f.importPath; a != b {
+			g.Fail(fmt.Sprintf("inconsistent package import paths: %v, %v", a, b))
+		}
+		if a, b := g.genFiles[0].packageName, f.packageName; a != b {
+			g.Fail(fmt.Sprintf("inconsistent package names: %v, %v", a, b))
+		}
+	}
+
+	// Names of support packages. These never vary (if there are conflicts,
+	// we rename the conflicting package), so this could be removed someday.
+	g.Pkg = map[string]string{
+		"fmt":   "fmt",
+		"math":  "math",
+		"proto": "proto",
+	}
+}
+
+// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos
+// and FileDescriptorProtos into file-referenced objects within the Generator.
+// It also creates the list of files to generate and so should be called before GenerateAllFiles.
+func (g *Generator) WrapTypes() {
+	g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile))
+	g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles))
+	genFileNames := make(map[string]bool)
+	for _, n := range g.Request.FileToGenerate {
+		genFileNames[n] = true
+	}
+	for _, f := range g.Request.ProtoFile {
+		fd := &FileDescriptor{
+			FileDescriptorProto: f,
+			exported:            make(map[Object][]symbol),
+			proto3:              fileIsProto3(f),
+		}
+		// The import path may be set in a number of ways.
+		if substitution, ok := g.ImportMap[f.GetName()]; ok {
+			// Command-line: M=foo.proto=quux/bar.
+			//
+			// Explicit mapping of source file to import path.
+			fd.importPath = GoImportPath(substitution)
+		} else if genFileNames[f.GetName()] && g.PackageImportPath != "" {
+			// Command-line: import_path=quux/bar.
+			//
+			// The import_path flag sets the import path for every file that
+			// we generate code for.
+			fd.importPath = GoImportPath(g.PackageImportPath)
+		} else if p, _, _ := fd.goPackageOption(); p != "" {
+			// Source file: option go_package = "quux/bar";
+			//
+			// The go_package option sets the import path. Most users should use this.
+			fd.importPath = p
+		} else {
+			// Source filename.
+			//
+			// Last resort when nothing else is available.
+			fd.importPath = GoImportPath(path.Dir(f.GetName()))
+		}
+		// We must wrap the descriptors before we wrap the enums
+		fd.desc = wrapDescriptors(fd)
+		g.buildNestedDescriptors(fd.desc)
+		fd.enum = wrapEnumDescriptors(fd, fd.desc)
+		g.buildNestedEnums(fd.desc, fd.enum)
+		fd.ext = wrapExtensions(fd)
+		extractComments(fd)
+		g.allFiles = append(g.allFiles, fd)
+		g.allFilesByName[f.GetName()] = fd
+	}
+	for _, fd := range g.allFiles {
+		fd.imp = wrapImported(fd, g)
+	}
+
+	g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate))
+	for _, fileName := range g.Request.FileToGenerate {
+		fd := g.allFilesByName[fileName]
+		if fd == nil {
+			g.Fail("could not find file named", fileName)
+		}
+		fingerprint, err := fingerprintProto(fd.FileDescriptorProto)
+		if err != nil {
+			g.Error(err)
+		}
+		fd.fingerprint = fingerprint
+		g.genFiles = append(g.genFiles, fd)
+	}
+}
+
+// fingerprintProto returns a fingerprint for a message.
+// The fingerprint is intended to prevent conflicts between generated fileds,
+// not to provide cryptographic security.
+func fingerprintProto(m proto.Message) (string, error) {
+	b, err := proto.Marshal(m)
+	if err != nil {
+		return "", err
+	}
+	h := sha256.Sum256(b)
+	return hex.EncodeToString(h[:8]), nil
+}
+
+// Scan the descriptors in this file.  For each one, build the slice of nested descriptors
+func (g *Generator) buildNestedDescriptors(descs []*Descriptor) {
+	for _, desc := range descs {
+		if len(desc.NestedType) != 0 {
+			for _, nest := range descs {
+				if nest.parent == desc {
+					desc.nested = append(desc.nested, nest)
+				}
+			}
+			if len(desc.nested) != len(desc.NestedType) {
+				g.Fail("internal error: nesting failure for", desc.GetName())
+			}
+		}
+	}
+}
+
+func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescriptor) {
+	for _, desc := range descs {
+		if len(desc.EnumType) != 0 {
+			for _, enum := range enums {
+				if enum.parent == desc {
+					desc.enums = append(desc.enums, enum)
+				}
+			}
+			if len(desc.enums) != len(desc.EnumType) {
+				g.Fail("internal error: enum nesting failure for", desc.GetName())
+			}
+		}
+	}
+}
+
+// Construct the Descriptor
+func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *Descriptor {
+	d := &Descriptor{
+		common:          common{file},
+		DescriptorProto: desc,
+		parent:          parent,
+		index:           index,
+	}
+	if parent == nil {
+		d.path = fmt.Sprintf("%d,%d", messagePath, index)
+	} else {
+		d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index)
+	}
+
+	// The only way to distinguish a group from a message is whether
+	// the containing message has a TYPE_GROUP field that matches.
+	if parent != nil {
+		parts := d.TypeName()
+		if file.Package != nil {
+			parts = append([]string{*file.Package}, parts...)
+		}
+		exp := "." + strings.Join(parts, ".")
+		for _, field := range parent.Field {
+			if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp {
+				d.group = true
+				break
+			}
+		}
+	}
+
+	for _, field := range desc.Extension {
+		d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d})
+	}
+
+	return d
+}
+
+// Return a slice of all the Descriptors defined within this file
+func wrapDescriptors(file *FileDescriptor) []*Descriptor {
+	sl := make([]*Descriptor, 0, len(file.MessageType)+10)
+	for i, desc := range file.MessageType {
+		sl = wrapThisDescriptor(sl, desc, nil, file, i)
+	}
+	return sl
+}
+
+// Wrap this Descriptor, recursively
+func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) []*Descriptor {
+	sl = append(sl, newDescriptor(desc, parent, file, index))
+	me := sl[len(sl)-1]
+	for i, nested := range desc.NestedType {
+		sl = wrapThisDescriptor(sl, nested, me, file, i)
+	}
+	return sl
+}
+
+// Construct the EnumDescriptor
+func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *EnumDescriptor {
+	ed := &EnumDescriptor{
+		common:              common{file},
+		EnumDescriptorProto: desc,
+		parent:              parent,
+		index:               index,
+	}
+	if parent == nil {
+		ed.path = fmt.Sprintf("%d,%d", enumPath, index)
+	} else {
+		ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index)
+	}
+	return ed
+}
+
+// Return a slice of all the EnumDescriptors defined within this file
+func wrapEnumDescriptors(file *FileDescriptor, descs []*Descriptor) []*EnumDescriptor {
+	sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10)
+	// Top-level enums.
+	for i, enum := range file.EnumType {
+		sl = append(sl, newEnumDescriptor(enum, nil, file, i))
+	}
+	// Enums within messages. Enums within embedded messages appear in the outer-most message.
+	for _, nested := range descs {
+		for i, enum := range nested.EnumType {
+			sl = append(sl, newEnumDescriptor(enum, nested, file, i))
+		}
+	}
+	return sl
+}
+
+// Return a slice of all the top-level ExtensionDescriptors defined within this file.
+func wrapExtensions(file *FileDescriptor) []*ExtensionDescriptor {
+	var sl []*ExtensionDescriptor
+	for _, field := range file.Extension {
+		sl = append(sl, &ExtensionDescriptor{common{file}, field, nil})
+	}
+	return sl
+}
+
+// Return a slice of all the types that are publicly imported into this file.
+func wrapImported(file *FileDescriptor, g *Generator) (sl []*ImportedDescriptor) {
+	for _, index := range file.PublicDependency {
+		df := g.fileByName(file.Dependency[index])
+		for _, d := range df.desc {
+			if d.GetOptions().GetMapEntry() {
+				continue
+			}
+			sl = append(sl, &ImportedDescriptor{common{file}, d})
+		}
+		for _, e := range df.enum {
+			sl = append(sl, &ImportedDescriptor{common{file}, e})
+		}
+		for _, ext := range df.ext {
+			sl = append(sl, &ImportedDescriptor{common{file}, ext})
+		}
+	}
+	return
+}
+
+func extractComments(file *FileDescriptor) {
+	file.comments = make(map[string]*descriptor.SourceCodeInfo_Location)
+	for _, loc := range file.GetSourceCodeInfo().GetLocation() {
+		if loc.LeadingComments == nil {
+			continue
+		}
+		var p []string
+		for _, n := range loc.Path {
+			p = append(p, strconv.Itoa(int(n)))
+		}
+		file.comments[strings.Join(p, ",")] = loc
+	}
+}
+
+// BuildTypeNameMap builds the map from fully qualified type names to objects.
+// The key names for the map come from the input data, which puts a period at the beginning.
+// It should be called after SetPackageNames and before GenerateAllFiles.
+func (g *Generator) BuildTypeNameMap() {
+	g.typeNameToObject = make(map[string]Object)
+	for _, f := range g.allFiles {
+		// The names in this loop are defined by the proto world, not us, so the
+		// package name may be empty.  If so, the dotted package name of X will
+		// be ".X"; otherwise it will be ".pkg.X".
+		dottedPkg := "." + f.GetPackage()
+		if dottedPkg != "." {
+			dottedPkg += "."
+		}
+		for _, enum := range f.enum {
+			name := dottedPkg + dottedSlice(enum.TypeName())
+			g.typeNameToObject[name] = enum
+		}
+		for _, desc := range f.desc {
+			name := dottedPkg + dottedSlice(desc.TypeName())
+			g.typeNameToObject[name] = desc
+		}
+	}
+}
+
+// ObjectNamed, given a fully-qualified input type name as it appears in the input data,
+// returns the descriptor for the message or enum with that name.
+func (g *Generator) ObjectNamed(typeName string) Object {
+	o, ok := g.typeNameToObject[typeName]
+	if !ok {
+		g.Fail("can't find object with type", typeName)
+	}
+
+	// If the file of this object isn't a direct dependency of the current file,
+	// or in the current file, then this object has been publicly imported into
+	// a dependency of the current file.
+	// We should return the ImportedDescriptor object for it instead.
+	direct := *o.File().Name == *g.file.Name
+	if !direct {
+		for _, dep := range g.file.Dependency {
+			if *g.fileByName(dep).Name == *o.File().Name {
+				direct = true
+				break
+			}
+		}
+	}
+	if !direct {
+		found := false
+	Loop:
+		for _, dep := range g.file.Dependency {
+			df := g.fileByName(*g.fileByName(dep).Name)
+			for _, td := range df.imp {
+				if td.o == o {
+					// Found it!
+					o = td
+					found = true
+					break Loop
+				}
+			}
+		}
+		if !found {
+			log.Printf("protoc-gen-go: WARNING: failed finding publicly imported dependency for %v, used in %v", typeName, *g.file.Name)
+		}
+	}
+
+	return o
+}
+
+// AnnotatedAtoms is a list of atoms (as consumed by P) that records the file name and proto AST path from which they originated.
+type AnnotatedAtoms struct {
+	source string
+	path   string
+	atoms  []interface{}
+}
+
+// Annotate records the file name and proto AST path of a list of atoms
+// so that a later call to P can emit a link from each atom to its origin.
+func Annotate(file *FileDescriptor, path string, atoms ...interface{}) *AnnotatedAtoms {
+	return &AnnotatedAtoms{source: *file.Name, path: path, atoms: atoms}
+}
+
+// printAtom prints the (atomic, non-annotation) argument to the generated output.
+func (g *Generator) printAtom(v interface{}) {
+	switch v := v.(type) {
+	case string:
+		g.WriteString(v)
+	case *string:
+		g.WriteString(*v)
+	case bool:
+		fmt.Fprint(g, v)
+	case *bool:
+		fmt.Fprint(g, *v)
+	case int:
+		fmt.Fprint(g, v)
+	case *int32:
+		fmt.Fprint(g, *v)
+	case *int64:
+		fmt.Fprint(g, *v)
+	case float64:
+		fmt.Fprint(g, v)
+	case *float64:
+		fmt.Fprint(g, *v)
+	case GoPackageName:
+		g.WriteString(string(v))
+	case GoImportPath:
+		g.WriteString(strconv.Quote(string(v)))
+	default:
+		g.Fail(fmt.Sprintf("unknown type in printer: %T", v))
+	}
+}
+
+// P prints the arguments to the generated output.  It handles strings and int32s, plus
+// handling indirections because they may be *string, etc.  Any inputs of type AnnotatedAtoms may emit
+// annotations in a .meta file in addition to outputting the atoms themselves (if g.annotateCode
+// is true).
+func (g *Generator) P(str ...interface{}) {
+	if !g.writeOutput {
+		return
+	}
+	g.WriteString(g.indent)
+	for _, v := range str {
+		switch v := v.(type) {
+		case *AnnotatedAtoms:
+			begin := int32(g.Len())
+			for _, v := range v.atoms {
+				g.printAtom(v)
+			}
+			if g.annotateCode {
+				end := int32(g.Len())
+				var path []int32
+				for _, token := range strings.Split(v.path, ",") {
+					val, err := strconv.ParseInt(token, 10, 32)
+					if err != nil {
+						g.Fail("could not parse proto AST path: ", err.Error())
+					}
+					path = append(path, int32(val))
+				}
+				g.annotations = append(g.annotations, &descriptor.GeneratedCodeInfo_Annotation{
+					Path:       path,
+					SourceFile: &v.source,
+					Begin:      &begin,
+					End:        &end,
+				})
+			}
+		default:
+			g.printAtom(v)
+		}
+	}
+	g.WriteByte('\n')
+}
+
+// addInitf stores the given statement to be printed inside the file's init function.
+// The statement is given as a format specifier and arguments.
+func (g *Generator) addInitf(stmt string, a ...interface{}) {
+	g.init = append(g.init, fmt.Sprintf(stmt, a...))
+}
+
+// In Indents the output one tab stop.
+func (g *Generator) In() { g.indent += "\t" }
+
+// Out unindents the output one tab stop.
+func (g *Generator) Out() {
+	if len(g.indent) > 0 {
+		g.indent = g.indent[1:]
+	}
+}
+
+// GenerateAllFiles generates the output for all the files we're outputting.
+func (g *Generator) GenerateAllFiles() {
+	// Initialize the plugins
+	for _, p := range plugins {
+		p.Init(g)
+	}
+	// Generate the output. The generator runs for every file, even the files
+	// that we don't generate output for, so that we can collate the full list
+	// of exported symbols to support public imports.
+	genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles))
+	for _, file := range g.genFiles {
+		genFileMap[file] = true
+	}
+	for _, file := range g.allFiles {
+		g.Reset()
+		g.annotations = nil
+		g.writeOutput = genFileMap[file]
+		g.generate(file)
+		if !g.writeOutput {
+			continue
+		}
+		fname := file.goFileName(g.pathType)
+		g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{
+			Name:    proto.String(fname),
+			Content: proto.String(g.String()),
+		})
+		if g.annotateCode {
+			// Store the generated code annotations in text, as the protoc plugin protocol requires that
+			// strings contain valid UTF-8.
+			g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{
+				Name:    proto.String(file.goFileName(g.pathType) + ".meta"),
+				Content: proto.String(proto.CompactTextString(&descriptor.GeneratedCodeInfo{Annotation: g.annotations})),
+			})
+		}
+	}
+}
+
+// Run all the plugins associated with the file.
+func (g *Generator) runPlugins(file *FileDescriptor) {
+	for _, p := range plugins {
+		p.Generate(file)
+	}
+}
+
+// Fill the response protocol buffer with the generated output for all the files we're
+// supposed to generate.
+func (g *Generator) generate(file *FileDescriptor) {
+	g.file = file
+	g.usedPackages = make(map[GoImportPath]bool)
+	g.packageNames = make(map[GoImportPath]GoPackageName)
+	g.usedPackageNames = make(map[GoPackageName]bool)
+	for name := range globalPackageNames {
+		g.usedPackageNames[name] = true
+	}
+
+	g.P("// This is a compile-time assertion to ensure that this generated file")
+	g.P("// is compatible with the proto package it is being compiled against.")
+	g.P("// A compilation error at this line likely means your copy of the")
+	g.P("// proto package needs to be updated.")
+	g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package")
+	g.P()
+
+	for _, td := range g.file.imp {
+		g.generateImported(td)
+	}
+	for _, enum := range g.file.enum {
+		g.generateEnum(enum)
+	}
+	for _, desc := range g.file.desc {
+		// Don't generate virtual messages for maps.
+		if desc.GetOptions().GetMapEntry() {
+			continue
+		}
+		g.generateMessage(desc)
+	}
+	for _, ext := range g.file.ext {
+		g.generateExtension(ext)
+	}
+	g.generateInitFunction()
+
+	// Run the plugins before the imports so we know which imports are necessary.
+	g.runPlugins(file)
+
+	g.generateFileDescriptor(file)
+
+	// Generate header and imports last, though they appear first in the output.
+	rem := g.Buffer
+	remAnno := g.annotations
+	g.Buffer = new(bytes.Buffer)
+	g.annotations = nil
+	g.generateHeader()
+	g.generateImports()
+	if !g.writeOutput {
+		return
+	}
+	// Adjust the offsets for annotations displaced by the header and imports.
+	for _, anno := range remAnno {
+		*anno.Begin += int32(g.Len())
+		*anno.End += int32(g.Len())
+		g.annotations = append(g.annotations, anno)
+	}
+	g.Write(rem.Bytes())
+
+	// Reformat generated code and patch annotation locations.
+	fset := token.NewFileSet()
+	original := g.Bytes()
+	if g.annotateCode {
+		// make a copy independent of g; we'll need it after Reset.
+		original = append([]byte(nil), original...)
+	}
+	ast, err := parser.ParseFile(fset, "", original, parser.ParseComments)
+	if err != nil {
+		// Print out the bad code with line numbers.
+		// This should never happen in practice, but it can while changing generated code,
+		// so consider this a debugging aid.
+		var src bytes.Buffer
+		s := bufio.NewScanner(bytes.NewReader(original))
+		for line := 1; s.Scan(); line++ {
+			fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes())
+		}
+		g.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String())
+	}
+	g.Reset()
+	err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, ast)
+	if err != nil {
+		g.Fail("generated Go source code could not be reformatted:", err.Error())
+	}
+	if g.annotateCode {
+		m, err := remap.Compute(original, g.Bytes())
+		if err != nil {
+			g.Fail("formatted generated Go source code could not be mapped back to the original code:", err.Error())
+		}
+		for _, anno := range g.annotations {
+			new, ok := m.Find(int(*anno.Begin), int(*anno.End))
+			if !ok {
+				g.Fail("span in formatted generated Go source code could not be mapped back to the original code")
+			}
+			*anno.Begin = int32(new.Pos)
+			*anno.End = int32(new.End)
+		}
+	}
+}
+
+// Generate the header, including package definition
+func (g *Generator) generateHeader() {
+	g.P("// Code generated by protoc-gen-go. DO NOT EDIT.")
+	if g.file.GetOptions().GetDeprecated() {
+		g.P("// ", g.file.Name, " is a deprecated file.")
+	} else {
+		g.P("// source: ", g.file.Name)
+	}
+	g.P()
+
+	importPath, _, _ := g.file.goPackageOption()
+	if importPath == "" {
+		g.P("package ", g.file.packageName)
+	} else {
+		g.P("package ", g.file.packageName, " // import ", GoImportPath(g.ImportPrefix)+importPath)
+	}
+	g.P()
+
+	if loc, ok := g.file.comments[strconv.Itoa(packagePath)]; ok {
+		g.P("/*")
+		// not using g.PrintComments because this is a /* */ comment block.
+		text := strings.TrimSuffix(loc.GetLeadingComments(), "\n")
+		for _, line := range strings.Split(text, "\n") {
+			line = strings.TrimPrefix(line, " ")
+			// ensure we don't escape from the block comment
+			line = strings.Replace(line, "*/", "* /", -1)
+			g.P(line)
+		}
+		g.P("*/")
+		g.P()
+	}
+}
+
+// deprecationComment is the standard comment added to deprecated
+// messages, fields, enums, and enum values.
+var deprecationComment = "// Deprecated: Do not use."
+
+// PrintComments prints any comments from the source .proto file.
+// The path is a comma-separated list of integers.
+// It returns an indication of whether any comments were printed.
+// See descriptor.proto for its format.
+func (g *Generator) PrintComments(path string) bool {
+	if !g.writeOutput {
+		return false
+	}
+	if c, ok := g.makeComments(path); ok {
+		g.P(c)
+		return true
+	}
+	return false
+}
+
+// makeComments generates the comment string for the field, no "\n" at the end
+func (g *Generator) makeComments(path string) (string, bool) {
+	loc, ok := g.file.comments[path]
+	if !ok {
+		return "", false
+	}
+	w := new(bytes.Buffer)
+	nl := ""
+	for _, line := range strings.Split(strings.TrimSuffix(loc.GetLeadingComments(), "\n"), "\n") {
+		fmt.Fprintf(w, "%s// %s", nl, strings.TrimPrefix(line, " "))
+		nl = "\n"
+	}
+	return w.String(), true
+}
+
+func (g *Generator) fileByName(filename string) *FileDescriptor {
+	return g.allFilesByName[filename]
+}
+
+// weak returns whether the ith import of the current file is a weak import.
+func (g *Generator) weak(i int32) bool {
+	for _, j := range g.file.WeakDependency {
+		if j == i {
+			return true
+		}
+	}
+	return false
+}
+
+// Generate the imports
+func (g *Generator) generateImports() {
+	// We almost always need a proto import.  Rather than computing when we
+	// do, which is tricky when there's a plugin, just import it and
+	// reference it later. The same argument applies to the fmt and math packages.
+	g.P("import "+g.Pkg["proto"]+" ", GoImportPath(g.ImportPrefix)+"github.com/golang/protobuf/proto")
+	g.P("import " + g.Pkg["fmt"] + ` "fmt"`)
+	g.P("import " + g.Pkg["math"] + ` "math"`)
+	var (
+		imports       = make(map[GoImportPath]bool)
+		strongImports = make(map[GoImportPath]bool)
+		importPaths   []string
+	)
+	for i, s := range g.file.Dependency {
+		fd := g.fileByName(s)
+		importPath := fd.importPath
+		// Do not import our own package.
+		if importPath == g.file.importPath {
+			continue
+		}
+		if !imports[importPath] {
+			importPaths = append(importPaths, string(importPath))
+		}
+		imports[importPath] = true
+		if !g.weak(int32(i)) {
+			strongImports[importPath] = true
+		}
+	}
+	sort.Strings(importPaths)
+	for i := range importPaths {
+		importPath := GoImportPath(importPaths[i])
+		packageName := g.GoPackageName(importPath)
+		fullPath := GoImportPath(g.ImportPrefix) + importPath
+		// Skip weak imports.
+		if !strongImports[importPath] {
+			g.P("// skipping weak import ", packageName, " ", fullPath)
+			continue
+		}
+		// We need to import all the dependencies, even if we don't reference them,
+		// because other code and tools depend on having the full transitive closure
+		// of protocol buffer types in the binary.
+		if _, ok := g.usedPackages[importPath]; !ok {
+			packageName = "_"
+		}
+		g.P("import ", packageName, " ", fullPath)
+	}
+	g.P()
+	// TODO: may need to worry about uniqueness across plugins
+	for _, p := range plugins {
+		p.GenerateImports(g.file)
+		g.P()
+	}
+	g.P("// Reference imports to suppress errors if they are not otherwise used.")
+	g.P("var _ = ", g.Pkg["proto"], ".Marshal")
+	g.P("var _ = ", g.Pkg["fmt"], ".Errorf")
+	g.P("var _ = ", g.Pkg["math"], ".Inf")
+	g.P()
+}
+
+func (g *Generator) generateImported(id *ImportedDescriptor) {
+	tn := id.TypeName()
+	sn := tn[len(tn)-1]
+	df := id.o.File()
+	filename := *df.Name
+	if df.importPath == g.file.importPath {
+		// Don't generate type aliases for files in the same Go package as this one.
+		g.P("// Ignoring public import of ", sn, " from ", filename)
+		g.P()
+		return
+	}
+	if !supportTypeAliases {
+		g.Fail(fmt.Sprintf("%s: public imports require at least go1.9", filename))
+	}
+	g.P("// ", sn, " from public import ", filename)
+	g.usedPackages[df.importPath] = true
+
+	for _, sym := range df.exported[id.o] {
+		sym.GenerateAlias(g, g.GoPackageName(df.importPath))
+	}
+
+	g.P()
+}
+
+// Generate the enum definitions for this EnumDescriptor.
+func (g *Generator) generateEnum(enum *EnumDescriptor) {
+	// The full type name
+	typeName := enum.TypeName()
+	// The full type name, CamelCased.
+	ccTypeName := CamelCaseSlice(typeName)
+	ccPrefix := enum.prefix()
+
+	deprecatedEnum := ""
+	if enum.GetOptions().GetDeprecated() {
+		deprecatedEnum = deprecationComment
+	}
+	g.PrintComments(enum.path)
+	g.P("type ", Annotate(enum.file, enum.path, ccTypeName), " int32", deprecatedEnum)
+	g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()})
+	g.P("const (")
+	for i, e := range enum.Value {
+		etorPath := fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i)
+		g.PrintComments(etorPath)
+
+		deprecatedValue := ""
+		if e.GetOptions().GetDeprecated() {
+			deprecatedValue = deprecationComment
+		}
+
+		name := ccPrefix + *e.Name
+		g.P(Annotate(enum.file, etorPath, name), " ", ccTypeName, " = ", e.Number, " ", deprecatedValue)
+		g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName})
+	}
+	g.P(")")
+	g.P("var ", ccTypeName, "_name = map[int32]string{")
+	generated := make(map[int32]bool) // avoid duplicate values
+	for _, e := range enum.Value {
+		duplicate := ""
+		if _, present := generated[*e.Number]; present {
+			duplicate = "// Duplicate value: "
+		}
+		g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",")
+		generated[*e.Number] = true
+	}
+	g.P("}")
+	g.P("var ", ccTypeName, "_value = map[string]int32{")
+	for _, e := range enum.Value {
+		g.P(strconv.Quote(*e.Name), ": ", e.Number, ",")
+	}
+	g.P("}")
+
+	if !enum.proto3() {
+		g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {")
+		g.P("p := new(", ccTypeName, ")")
+		g.P("*p = x")
+		g.P("return p")
+		g.P("}")
+	}
+
+	g.P("func (x ", ccTypeName, ") String() string {")
+	g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))")
+	g.P("}")
+
+	if !enum.proto3() {
+		g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {")
+		g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`)
+		g.P("if err != nil {")
+		g.P("return err")
+		g.P("}")
+		g.P("*x = ", ccTypeName, "(value)")
+		g.P("return nil")
+		g.P("}")
+	}
+
+	var indexes []string
+	for m := enum.parent; m != nil; m = m.parent {
+		// XXX: skip groups?
+		indexes = append([]string{strconv.Itoa(m.index)}, indexes...)
+	}
+	indexes = append(indexes, strconv.Itoa(enum.index))
+	g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) {")
+	g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}")
+	g.P("}")
+	if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" {
+		g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`)
+	}
+
+	g.P()
+}
+
+// The tag is a string like "varint,2,opt,name=fieldname,def=7" that
+// identifies details of the field for the protocol buffer marshaling and unmarshaling
+// code.  The fields are:
+//	wire encoding
+//	protocol tag number
+//	opt,req,rep for optional, required, or repeated
+//	packed whether the encoding is "packed" (optional; repeated primitives only)
+//	name= the original declared name
+//	enum= the name of the enum type if it is an enum-typed field.
+//	proto3 if this field is in a proto3 message
+//	def= string representation of the default value, if any.
+// The default value must be in a representation that can be used at run-time
+// to generate the default value. Thus bools become 0 and 1, for instance.
+func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string {
+	optrepreq := ""
+	switch {
+	case isOptional(field):
+		optrepreq = "opt"
+	case isRequired(field):
+		optrepreq = "req"
+	case isRepeated(field):
+		optrepreq = "rep"
+	}
+	var defaultValue string
+	if dv := field.DefaultValue; dv != nil { // set means an explicit default
+		defaultValue = *dv
+		// Some types need tweaking.
+		switch *field.Type {
+		case descriptor.FieldDescriptorProto_TYPE_BOOL:
+			if defaultValue == "true" {
+				defaultValue = "1"
+			} else {
+				defaultValue = "0"
+			}
+		case descriptor.FieldDescriptorProto_TYPE_STRING,
+			descriptor.FieldDescriptorProto_TYPE_BYTES:
+			// Nothing to do. Quoting is done for the whole tag.
+		case descriptor.FieldDescriptorProto_TYPE_ENUM:
+			// For enums we need to provide the integer constant.
+			obj := g.ObjectNamed(field.GetTypeName())
+			if id, ok := obj.(*ImportedDescriptor); ok {
+				// It is an enum that was publicly imported.
+				// We need the underlying type.
+				obj = id.o
+			}
+			enum, ok := obj.(*EnumDescriptor)
+			if !ok {
+				log.Printf("obj is a %T", obj)
+				if id, ok := obj.(*ImportedDescriptor); ok {
+					log.Printf("id.o is a %T", id.o)
+				}
+				g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName()))
+			}
+			defaultValue = enum.integerValueAsString(defaultValue)
+		}
+		defaultValue = ",def=" + defaultValue
+	}
+	enum := ""
+	if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM {
+		// We avoid using obj.GoPackageName(), because we want to use the
+		// original (proto-world) package name.
+		obj := g.ObjectNamed(field.GetTypeName())
+		if id, ok := obj.(*ImportedDescriptor); ok {
+			obj = id.o
+		}
+		enum = ",enum="
+		if pkg := obj.File().GetPackage(); pkg != "" {
+			enum += pkg + "."
+		}
+		enum += CamelCaseSlice(obj.TypeName())
+	}
+	packed := ""
+	if (field.Options != nil && field.Options.GetPacked()) ||
+		// Per https://developers.google.com/protocol-buffers/docs/proto3#simple:
+		// "In proto3, repeated fields of scalar numeric types use packed encoding by default."
+		(message.proto3() && (field.Options == nil || field.Options.Packed == nil) &&
+			isRepeated(field) && isScalar(field)) {
+		packed = ",packed"
+	}
+	fieldName := field.GetName()
+	name := fieldName
+	if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP {
+		// We must use the type name for groups instead of
+		// the field name to preserve capitalization.
+		// type_name in FieldDescriptorProto is fully-qualified,
+		// but we only want the local part.
+		name = *field.TypeName
+		if i := strings.LastIndex(name, "."); i >= 0 {
+			name = name[i+1:]
+		}
+	}
+	if json := field.GetJsonName(); json != "" && json != name {
+		// TODO: escaping might be needed, in which case
+		// perhaps this should be in its own "json" tag.
+		name += ",json=" + json
+	}
+	name = ",name=" + name
+	if message.proto3() {
+		name += ",proto3"
+	}
+	oneof := ""
+	if field.OneofIndex != nil {
+		oneof = ",oneof"
+	}
+	return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s",
+		wiretype,
+		field.GetNumber(),
+		optrepreq,
+		packed,
+		name,
+		enum,
+		oneof,
+		defaultValue))
+}
+
+func needsStar(typ descriptor.FieldDescriptorProto_Type) bool {
+	switch typ {
+	case descriptor.FieldDescriptorProto_TYPE_GROUP:
+		return false
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+		return false
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		return false
+	}
+	return true
+}
+
+// TypeName is the printed name appropriate for an item. If the object is in the current file,
+// TypeName drops the package name and underscores the rest.
+// Otherwise the object is from another package; and the result is the underscored
+// package name followed by the item name.
+// The result always has an initial capital.
+func (g *Generator) TypeName(obj Object) string {
+	return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName())
+}
+
+// GoType returns a string representing the type name, and the wire type
+func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) {
+	// TODO: Options.
+	switch *field.Type {
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		typ, wire = "float64", "fixed64"
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		typ, wire = "float32", "fixed32"
+	case descriptor.FieldDescriptorProto_TYPE_INT64:
+		typ, wire = "int64", "varint"
+	case descriptor.FieldDescriptorProto_TYPE_UINT64:
+		typ, wire = "uint64", "varint"
+	case descriptor.FieldDescriptorProto_TYPE_INT32:
+		typ, wire = "int32", "varint"
+	case descriptor.FieldDescriptorProto_TYPE_UINT32:
+		typ, wire = "uint32", "varint"
+	case descriptor.FieldDescriptorProto_TYPE_FIXED64:
+		typ, wire = "uint64", "fixed64"
+	case descriptor.FieldDescriptorProto_TYPE_FIXED32:
+		typ, wire = "uint32", "fixed32"
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		typ, wire = "bool", "varint"
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		typ, wire = "string", "bytes"
+	case descriptor.FieldDescriptorProto_TYPE_GROUP:
+		desc := g.ObjectNamed(field.GetTypeName())
+		typ, wire = "*"+g.TypeName(desc), "group"
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+		desc := g.ObjectNamed(field.GetTypeName())
+		typ, wire = "*"+g.TypeName(desc), "bytes"
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		typ, wire = "[]byte", "bytes"
+	case descriptor.FieldDescriptorProto_TYPE_ENUM:
+		desc := g.ObjectNamed(field.GetTypeName())
+		typ, wire = g.TypeName(desc), "varint"
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+		typ, wire = "int32", "fixed32"
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+		typ, wire = "int64", "fixed64"
+	case descriptor.FieldDescriptorProto_TYPE_SINT32:
+		typ, wire = "int32", "zigzag32"
+	case descriptor.FieldDescriptorProto_TYPE_SINT64:
+		typ, wire = "int64", "zigzag64"
+	default:
+		g.Fail("unknown type for", field.GetName())
+	}
+	if isRepeated(field) {
+		typ = "[]" + typ
+	} else if message != nil && message.proto3() {
+		return
+	} else if field.OneofIndex != nil && message != nil {
+		return
+	} else if needsStar(*field.Type) {
+		typ = "*" + typ
+	}
+	return
+}
+
+func (g *Generator) RecordTypeUse(t string) {
+	if _, ok := g.typeNameToObject[t]; ok {
+		// Call ObjectNamed to get the true object to record the use.
+		obj := g.ObjectNamed(t)
+		g.usedPackages[obj.GoImportPath()] = true
+	}
+}
+
+// Method names that may be generated.  Fields with these names get an
+// underscore appended. Any change to this set is a potential incompatible
+// API change because it changes generated field names.
+var methodNames = [...]string{
+	"Reset",
+	"String",
+	"ProtoMessage",
+	"Marshal",
+	"Unmarshal",
+	"ExtensionRangeArray",
+	"ExtensionMap",
+	"Descriptor",
+}
+
+// Names of messages in the `google.protobuf` package for which
+// we will generate XXX_WellKnownType methods.
+var wellKnownTypes = map[string]bool{
+	"Any":       true,
+	"Duration":  true,
+	"Empty":     true,
+	"Struct":    true,
+	"Timestamp": true,
+
+	"Value":       true,
+	"ListValue":   true,
+	"DoubleValue": true,
+	"FloatValue":  true,
+	"Int64Value":  true,
+	"UInt64Value": true,
+	"Int32Value":  true,
+	"UInt32Value": true,
+	"BoolValue":   true,
+	"StringValue": true,
+	"BytesValue":  true,
+}
+
+// getterDefault finds the default value for the field to return from a getter,
+// regardless of if it's a built in default or explicit from the source. Returns e.g. "nil", `""`, "Default_MessageType_FieldName"
+func (g *Generator) getterDefault(field *descriptor.FieldDescriptorProto, goMessageType string) string {
+	if isRepeated(field) {
+		return "nil"
+	}
+	if def := field.GetDefaultValue(); def != "" {
+		defaultConstant := g.defaultConstantName(goMessageType, field.GetName())
+		if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES {
+			return defaultConstant
+		}
+		return "append([]byte(nil), " + defaultConstant + "...)"
+	}
+	switch *field.Type {
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		return "false"
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		return `""`
+	case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_BYTES:
+		return "nil"
+	case descriptor.FieldDescriptorProto_TYPE_ENUM:
+		obj := g.ObjectNamed(field.GetTypeName())
+		var enum *EnumDescriptor
+		if id, ok := obj.(*ImportedDescriptor); ok {
+			// The enum type has been publicly imported.
+			enum, _ = id.o.(*EnumDescriptor)
+		} else {
+			enum, _ = obj.(*EnumDescriptor)
+		}
+		if enum == nil {
+			log.Printf("don't know how to generate getter for %s", field.GetName())
+			return "nil"
+		}
+		if len(enum.Value) == 0 {
+			return "0 // empty enum"
+		}
+		first := enum.Value[0].GetName()
+		return g.DefaultPackageName(obj) + enum.prefix() + first
+	default:
+		return "0"
+	}
+}
+
+// defaultConstantName builds the name of the default constant from the message
+// type name and the untouched field name, e.g. "Default_MessageType_FieldName"
+func (g *Generator) defaultConstantName(goMessageType, protoFieldName string) string {
+	return "Default_" + goMessageType + "_" + CamelCase(protoFieldName)
+}
+
+// The different types of fields in a message and how to actually print them
+// Most of the logic for generateMessage is in the methods of these types.
+//
+// Note that the content of the field is irrelevant, a simpleField can contain
+// anything from a scalar to a group (which is just a message).
+//
+// Extension fields (and message sets) are however handled separately.
+//
+// simpleField - a field that is neiter weak nor oneof, possibly repeated
+// oneofField - field containing list of subfields:
+// - oneofSubField - a field within the oneof
+
+// msgCtx contais the context for the generator functions.
+type msgCtx struct {
+	goName  string      // Go struct name of the message, e.g. MessageName
+	message *Descriptor // The descriptor for the message
+}
+
+// fieldCommon contains data common to all types of fields.
+type fieldCommon struct {
+	goName     string // Go name of field, e.g. "FieldName" or "Descriptor_"
+	protoName  string // Name of field in proto language, e.g. "field_name" or "descriptor"
+	getterName string // Name of the getter, e.g. "GetFieldName" or "GetDescriptor_"
+	goType     string // The Go type as a string, e.g. "*int32" or "*OtherMessage"
+	tags       string // The tag string/annotation for the type, e.g. `protobuf:"varint,8,opt,name=region_id,json=regionId"`
+	fullPath   string // The full path of the field as used by Annotate etc, e.g. "4,0,2,0"
+}
+
+// getProtoName gets the proto name of a field, e.g. "field_name" or "descriptor".
+func (f *fieldCommon) getProtoName() string {
+	return f.protoName
+}
+
+// getGoType returns the go type of the field  as a string, e.g. "*int32".
+func (f *fieldCommon) getGoType() string {
+	return f.goType
+}
+
+// simpleField is not weak, not a oneof, not an extension. Can be required, optional or repeated.
+type simpleField struct {
+	fieldCommon
+	protoTypeName string                               // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration"
+	protoType     descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64
+	deprecated    string                               // Deprecation comment, if any, e.g. "// Deprecated: Do not use."
+	getterDef     string                               // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName"
+	protoDef      string                               // Default value as defined in the proto file, e.g "yoshi" or "5"
+	comment       string                               // The full comment for the field, e.g. "// Useful information"
+}
+
+// decl prints the declaration of the field in the struct (if any).
+func (f *simpleField) decl(g *Generator, mc *msgCtx) {
+	g.P(f.comment, Annotate(mc.message.file, f.fullPath, f.goName), "\t", f.goType, "\t`", f.tags, "`", f.deprecated)
+}
+
+// getter prints the getter for the field.
+func (f *simpleField) getter(g *Generator, mc *msgCtx) {
+	star := ""
+	tname := f.goType
+	if needsStar(f.protoType) && tname[0] == '*' {
+		tname = tname[1:]
+		star = "*"
+	}
+	if f.deprecated != "" {
+		g.P(f.deprecated)
+	}
+	g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() "+tname+" {")
+	if f.getterDef == "nil" { // Simpler getter
+		g.P("if m != nil {")
+		g.P("return m." + f.goName)
+		g.P("}")
+		g.P("return nil")
+		g.P("}")
+		g.P()
+		return
+	}
+	if mc.message.proto3() {
+		g.P("if m != nil {")
+	} else {
+		g.P("if m != nil && m." + f.goName + " != nil {")
+	}
+	g.P("return " + star + "m." + f.goName)
+	g.P("}")
+	g.P("return ", f.getterDef)
+	g.P("}")
+	g.P()
+}
+
+// setter prints the setter method of the field.
+func (f *simpleField) setter(g *Generator, mc *msgCtx) {
+	// No setter for regular fields yet
+}
+
+// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5".
+func (f *simpleField) getProtoDef() string {
+	return f.protoDef
+}
+
+// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration".
+func (f *simpleField) getProtoTypeName() string {
+	return f.protoTypeName
+}
+
+// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64.
+func (f *simpleField) getProtoType() descriptor.FieldDescriptorProto_Type {
+	return f.protoType
+}
+
+// oneofSubFields are kept slize held by each oneofField. They do not appear in the top level slize of fields for the message.
+type oneofSubField struct {
+	fieldCommon
+	protoTypeName string                               // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration"
+	protoType     descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64
+	oneofTypeName string                               // Type name of the enclosing struct, e.g. "MessageName_FieldName"
+	fieldNumber   int                                  // Actual field number, as defined in proto, e.g. 12
+	getterDef     string                               // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName"
+	protoDef      string                               // Default value as defined in the proto file, e.g "yoshi" or "5"
+}
+
+// wireTypeName returns a textual wire type, needed for oneof sub fields in generated code.
+func (f *oneofSubField) wireTypeName() string {
+	switch f.protoType {
+	case descriptor.FieldDescriptorProto_TYPE_FIXED64,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED64,
+		descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		return "WireFixed64"
+	case descriptor.FieldDescriptorProto_TYPE_FIXED32,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED32,
+		descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		return "WireFixed32"
+	case descriptor.FieldDescriptorProto_TYPE_GROUP:
+		return "WireStartGroup"
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
+		descriptor.FieldDescriptorProto_TYPE_STRING,
+		descriptor.FieldDescriptorProto_TYPE_BYTES:
+		return "WireBytes"
+	default: // All others are Varints
+		return "WireVarint"
+	}
+}
+
+// typedNil prints a nil casted to the pointer to this field.
+// - for XXX_OneofFuncs
+func (f *oneofSubField) typedNil(g *Generator) {
+	g.P("(*", f.oneofTypeName, ")(nil),")
+}
+
+// marshalCase prints the case matching this oneof subfield in the marshalling code.
+func (f *oneofSubField) marshalCase(g *Generator) {
+	g.P("case *", f.oneofTypeName, ":")
+	wire := f.wireTypeName()
+	var pre, post string
+	val := "x." + f.goName // overridden for TYPE_BOOL
+	switch f.protoType {
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		pre = "b.EncodeFixed64(" + g.Pkg["math"] + ".Float64bits("
+		post = "))"
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		pre = "b.EncodeFixed32(uint64(" + g.Pkg["math"] + ".Float32bits("
+		post = ")))"
+	case descriptor.FieldDescriptorProto_TYPE_INT64, descriptor.FieldDescriptorProto_TYPE_UINT64:
+		pre, post = "b.EncodeVarint(uint64(", "))"
+	case descriptor.FieldDescriptorProto_TYPE_INT32, descriptor.FieldDescriptorProto_TYPE_UINT32, descriptor.FieldDescriptorProto_TYPE_ENUM:
+		pre, post = "b.EncodeVarint(uint64(", "))"
+	case descriptor.FieldDescriptorProto_TYPE_FIXED64, descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+		pre, post = "b.EncodeFixed64(uint64(", "))"
+	case descriptor.FieldDescriptorProto_TYPE_FIXED32, descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+		pre, post = "b.EncodeFixed32(uint64(", "))"
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		g.P("t := uint64(0)")
+		g.P("if ", val, " { t = 1 }")
+		val = "t"
+		pre, post = "b.EncodeVarint(", ")"
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		pre, post = "b.EncodeStringBytes(", ")"
+	case descriptor.FieldDescriptorProto_TYPE_GROUP:
+		pre, post = "b.Marshal(", ")"
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+		pre, post = "b.EncodeMessage(", ")"
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		pre, post = "b.EncodeRawBytes(", ")"
+	case descriptor.FieldDescriptorProto_TYPE_SINT32:
+		pre, post = "b.EncodeZigzag32(uint64(", "))"
+	case descriptor.FieldDescriptorProto_TYPE_SINT64:
+		pre, post = "b.EncodeZigzag64(uint64(", "))"
+	default:
+		g.Fail("unhandled oneof field type ", f.protoType.String())
+	}
+	g.P("b.EncodeVarint(", f.fieldNumber, "<<3|", g.Pkg["proto"], ".", wire, ")")
+	if t := f.protoType; t != descriptor.FieldDescriptorProto_TYPE_GROUP && t != descriptor.FieldDescriptorProto_TYPE_MESSAGE {
+		g.P(pre, val, post)
+	} else {
+		g.P("if err := ", pre, val, post, "; err != nil {")
+		g.P("return err")
+		g.P("}")
+	}
+	if f.protoType == descriptor.FieldDescriptorProto_TYPE_GROUP {
+		g.P("b.EncodeVarint(", f.fieldNumber, "<<3|", g.Pkg["proto"], ".WireEndGroup)")
+	}
+}
+
+// unmarshalCase prints the case matching this oneof subfield in the unmarshalling code.
+func (f *oneofSubField) unmarshalCase(g *Generator, origOneofName string, oneofName string) {
+	g.P("case ", f.fieldNumber, ": // ", origOneofName, ".", f.getProtoName())
+	g.P("if wire != ", g.Pkg["proto"], ".", f.wireTypeName(), " {")
+	g.P("return true, ", g.Pkg["proto"], ".ErrInternalBadWireType")
+	g.P("}")
+	lhs := "x, err" // overridden for TYPE_MESSAGE and TYPE_GROUP
+	var dec, cast, cast2 string
+	switch f.protoType {
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		dec, cast = "b.DecodeFixed64()", g.Pkg["math"]+".Float64frombits"
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		dec, cast, cast2 = "b.DecodeFixed32()", "uint32", g.Pkg["math"]+".Float32frombits"
+	case descriptor.FieldDescriptorProto_TYPE_INT64:
+		dec, cast = "b.DecodeVarint()", "int64"
+	case descriptor.FieldDescriptorProto_TYPE_UINT64:
+		dec = "b.DecodeVarint()"
+	case descriptor.FieldDescriptorProto_TYPE_INT32:
+		dec, cast = "b.DecodeVarint()", "int32"
+	case descriptor.FieldDescriptorProto_TYPE_FIXED64:
+		dec = "b.DecodeFixed64()"
+	case descriptor.FieldDescriptorProto_TYPE_FIXED32:
+		dec, cast = "b.DecodeFixed32()", "uint32"
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		dec = "b.DecodeVarint()"
+		// handled specially below
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		dec = "b.DecodeStringBytes()"
+	case descriptor.FieldDescriptorProto_TYPE_GROUP:
+		g.P("msg := new(", f.goType[1:], ")") // drop star
+		lhs = "err"
+		dec = "b.DecodeGroup(msg)"
+		// handled specially below
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+		g.P("msg := new(", f.goType[1:], ")") // drop star
+		lhs = "err"
+		dec = "b.DecodeMessage(msg)"
+		// handled specially below
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		dec = "b.DecodeRawBytes(true)"
+	case descriptor.FieldDescriptorProto_TYPE_UINT32:
+		dec, cast = "b.DecodeVarint()", "uint32"
+	case descriptor.FieldDescriptorProto_TYPE_ENUM:
+		dec, cast = "b.DecodeVarint()", f.goType
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+		dec, cast = "b.DecodeFixed32()", "int32"
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+		dec, cast = "b.DecodeFixed64()", "int64"
+	case descriptor.FieldDescriptorProto_TYPE_SINT32:
+		dec, cast = "b.DecodeZigzag32()", "int32"
+	case descriptor.FieldDescriptorProto_TYPE_SINT64:
+		dec, cast = "b.DecodeZigzag64()", "int64"
+	default:
+		g.Fail("unhandled oneof field type ", f.protoType.String())
+	}
+	g.P(lhs, " := ", dec)
+	val := "x"
+	if cast != "" {
+		val = cast + "(" + val + ")"
+	}
+	if cast2 != "" {
+		val = cast2 + "(" + val + ")"
+	}
+	switch f.protoType {
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		val += " != 0"
+	case descriptor.FieldDescriptorProto_TYPE_GROUP,
+		descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+		val = "msg"
+	}
+	g.P("m.", oneofName, " = &", f.oneofTypeName, "{", val, "}")
+	g.P("return true, err")
+}
+
+// sizerCase prints the case matching this oneof subfield in the sizer code.
+func (f *oneofSubField) sizerCase(g *Generator) {
+	g.P("case *", f.oneofTypeName, ":")
+	val := "x." + f.goName
+	var varint, fixed string
+	switch f.protoType {
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		fixed = "8"
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		fixed = "4"
+	case descriptor.FieldDescriptorProto_TYPE_INT64, descriptor.FieldDescriptorProto_TYPE_UINT64, descriptor.FieldDescriptorProto_TYPE_INT32, descriptor.FieldDescriptorProto_TYPE_UINT32, descriptor.FieldDescriptorProto_TYPE_ENUM:
+		varint = val
+	case descriptor.FieldDescriptorProto_TYPE_FIXED64, descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+		fixed = "8"
+	case descriptor.FieldDescriptorProto_TYPE_FIXED32, descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+		fixed = "4"
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		fixed = "1"
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		fixed = "len(" + val + ")"
+		varint = fixed
+	case descriptor.FieldDescriptorProto_TYPE_GROUP:
+		fixed = g.Pkg["proto"] + ".Size(" + val + ")"
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+		g.P("s := ", g.Pkg["proto"], ".Size(", val, ")")
+		fixed = "s"
+		varint = fixed
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		fixed = "len(" + val + ")"
+		varint = fixed
+	case descriptor.FieldDescriptorProto_TYPE_SINT32:
+		varint = "(uint32(" + val + ") << 1) ^ uint32((int32(" + val + ") >> 31))"
+	case descriptor.FieldDescriptorProto_TYPE_SINT64:
+		varint = "uint64(" + val + " << 1) ^ uint64((int64(" + val + ") >> 63))"
+	default:
+		g.Fail("unhandled oneof field type ", f.protoType.String())
+	}
+	// Tag and wire varint is known statically,
+	// so don't generate code for that part of the size computation.
+	tagAndWireSize := proto.SizeVarint(uint64(f.fieldNumber << 3)) // wire doesn't affect varint size
+	g.P("n += ", tagAndWireSize, " // tag and wire")
+	if varint != "" {
+		g.P("n += ", g.Pkg["proto"], ".SizeVarint(uint64(", varint, "))")
+	}
+	if fixed != "" {
+		g.P("n += ", fixed)
+	}
+	if f.protoType == descriptor.FieldDescriptorProto_TYPE_GROUP {
+		g.P("n += ", tagAndWireSize, " // tag and wire")
+	}
+}
+
+// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5".
+func (f *oneofSubField) getProtoDef() string {
+	return f.protoDef
+}
+
+// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration".
+func (f *oneofSubField) getProtoTypeName() string {
+	return f.protoTypeName
+}
+
+// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64.
+func (f *oneofSubField) getProtoType() descriptor.FieldDescriptorProto_Type {
+	return f.protoType
+}
+
+// oneofField represents the oneof on top level.
+// The alternative fields within the oneof are represented by oneofSubField.
+type oneofField struct {
+	fieldCommon
+	subFields []*oneofSubField // All the possible oneof fields
+	comment   string           // The full comment for the field, e.g. "// Types that are valid to be assigned to MyOneof:\n\\"
+}
+
+// decl prints the declaration of the field in the struct (if any).
+func (f *oneofField) decl(g *Generator, mc *msgCtx) {
+	comment := f.comment
+	for _, sf := range f.subFields {
+		comment += "//\t*" + sf.oneofTypeName + "\n"
+	}
+	g.P(comment, Annotate(mc.message.file, f.fullPath, f.goName), " ", f.goType, " `", f.tags, "`")
+}
+
+// getter for a oneof field will print additional discriminators and interfaces for the oneof,
+// also it prints all the getters for the sub fields.
+func (f *oneofField) getter(g *Generator, mc *msgCtx) {
+	// The discriminator type
+	g.P("type ", f.goType, " interface {")
+	g.P(f.goType, "()")
+	g.P("}")
+	g.P()
+	// The subField types, fulfilling the discriminator type contract
+	for _, sf := range f.subFields {
+		g.P("type ", Annotate(mc.message.file, sf.fullPath, sf.oneofTypeName), " struct {")
+		g.P(Annotate(mc.message.file, sf.fullPath, sf.goName), " ", sf.goType, " `", sf.tags, "`")
+		g.P("}")
+		g.P()
+	}
+	for _, sf := range f.subFields {
+		g.P("func (*", sf.oneofTypeName, ") ", f.goType, "() {}")
+		g.P()
+	}
+	// Getter for the oneof field
+	g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() ", f.goType, " {")
+	g.P("if m != nil { return m.", f.goName, " }")
+	g.P("return nil")
+	g.P("}")
+	g.P()
+	// Getters for each oneof
+	for _, sf := range f.subFields {
+		g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, sf.fullPath, sf.getterName), "() "+sf.goType+" {")
+		g.P("if x, ok := m.", f.getterName, "().(*", sf.oneofTypeName, "); ok {")
+		g.P("return x.", sf.goName)
+		g.P("}")
+		g.P("return ", sf.getterDef)
+		g.P("}")
+		g.P()
+	}
+}
+
+// setter prints the setter method of the field.
+func (f *oneofField) setter(g *Generator, mc *msgCtx) {
+	// No setters for oneof yet
+}
+
+// topLevelField interface implemented by all types of fields on the top level (not oneofSubField).
+type topLevelField interface {
+	decl(g *Generator, mc *msgCtx)   // print declaration within the struct
+	getter(g *Generator, mc *msgCtx) // print getter
+	setter(g *Generator, mc *msgCtx) // print setter if applicable
+}
+
+// defField interface implemented by all types of fields that can have defaults (not oneofField, but instead oneofSubField).
+type defField interface {
+	getProtoDef() string                                // default value explicitly stated in the proto file, e.g "yoshi" or "5"
+	getProtoName() string                               // proto name of a field, e.g. "field_name" or "descriptor"
+	getGoType() string                                  // go type of the field  as a string, e.g. "*int32"
+	getProtoTypeName() string                           // protobuf type name for the field, e.g. ".google.protobuf.Duration"
+	getProtoType() descriptor.FieldDescriptorProto_Type // *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64
+}
+
+// generateDefaultConstants adds constants for default values if needed, which is only if the default value is.
+// explicit in the proto.
+func (g *Generator) generateDefaultConstants(mc *msgCtx, topLevelFields []topLevelField) {
+	// Collect fields that can have defaults
+	dFields := []defField{}
+	for _, pf := range topLevelFields {
+		if f, ok := pf.(*oneofField); ok {
+			for _, osf := range f.subFields {
+				dFields = append(dFields, osf)
+			}
+			continue
+		}
+		dFields = append(dFields, pf.(defField))
+	}
+	for _, df := range dFields {
+		def := df.getProtoDef()
+		if def == "" {
+			continue
+		}
+		fieldname := g.defaultConstantName(mc.goName, df.getProtoName())
+		typename := df.getGoType()
+		if typename[0] == '*' {
+			typename = typename[1:]
+		}
+		kind := "const "
+		switch {
+		case typename == "bool":
+		case typename == "string":
+			def = strconv.Quote(def)
+		case typename == "[]byte":
+			def = "[]byte(" + strconv.Quote(unescape(def)) + ")"
+			kind = "var "
+		case def == "inf", def == "-inf", def == "nan":
+			// These names are known to, and defined by, the protocol language.
+			switch def {
+			case "inf":
+				def = "math.Inf(1)"
+			case "-inf":
+				def = "math.Inf(-1)"
+			case "nan":
+				def = "math.NaN()"
+			}
+			if df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT {
+				def = "float32(" + def + ")"
+			}
+			kind = "var "
+		case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_ENUM:
+			// Must be an enum.  Need to construct the prefixed name.
+			obj := g.ObjectNamed(df.getProtoTypeName())
+			var enum *EnumDescriptor
+			if id, ok := obj.(*ImportedDescriptor); ok {
+				// The enum type has been publicly imported.
+				enum, _ = id.o.(*EnumDescriptor)
+			} else {
+				enum, _ = obj.(*EnumDescriptor)
+			}
+			if enum == nil {
+				log.Printf("don't know how to generate constant for %s", fieldname)
+				continue
+			}
+			def = g.DefaultPackageName(obj) + enum.prefix() + def
+		}
+		g.P(kind, fieldname, " ", typename, " = ", def)
+		g.file.addExport(mc.message, constOrVarSymbol{fieldname, kind, ""})
+	}
+	g.P()
+}
+
+// generateInternalStructFields just adds the XXX_<something> fields to the message struct.
+func (g *Generator) generateInternalStructFields(mc *msgCtx, topLevelFields []topLevelField) {
+	g.P("XXX_NoUnkeyedLiteral\tstruct{} `json:\"-\"`") // prevent unkeyed struct literals
+	if len(mc.message.ExtensionRange) > 0 {
+		messageset := ""
+		if opts := mc.message.Options; opts != nil && opts.GetMessageSetWireFormat() {
+			messageset = "protobuf_messageset:\"1\" "
+		}
+		g.P(g.Pkg["proto"], ".XXX_InternalExtensions `", messageset, "json:\"-\"`")
+	}
+	g.P("XXX_unrecognized\t[]byte `json:\"-\"`")
+	g.P("XXX_sizecache\tint32 `json:\"-\"`")
+
+}
+
+// generateOneofFuncs adds all the utility functions for oneof, including marshalling, unmarshalling and sizer.
+func (g *Generator) generateOneofFuncs(mc *msgCtx, topLevelFields []topLevelField) {
+	ofields := []*oneofField{}
+	for _, f := range topLevelFields {
+		if o, ok := f.(*oneofField); ok {
+			ofields = append(ofields, o)
+		}
+	}
+	if len(ofields) == 0 {
+		return
+	}
+	enc := "_" + mc.goName + "_OneofMarshaler"
+	dec := "_" + mc.goName + "_OneofUnmarshaler"
+	size := "_" + mc.goName + "_OneofSizer"
+	encSig := "(msg " + g.Pkg["proto"] + ".Message, b *" + g.Pkg["proto"] + ".Buffer) error"
+	decSig := "(msg " + g.Pkg["proto"] + ".Message, tag, wire int, b *" + g.Pkg["proto"] + ".Buffer) (bool, error)"
+	sizeSig := "(msg " + g.Pkg["proto"] + ".Message) (n int)"
+
+	// OneofFuncs
+	g.P("// XXX_OneofFuncs is for the internal use of the proto package.")
+	g.P("func (*", mc.goName, ") XXX_OneofFuncs() (func", encSig, ", func", decSig, ", func", sizeSig, ", []interface{}) {")
+	g.P("return ", enc, ", ", dec, ", ", size, ", []interface{}{")
+	for _, of := range ofields {
+		for _, sf := range of.subFields {
+			sf.typedNil(g)
+		}
+	}
+	g.P("}")
+	g.P("}")
+	g.P()
+
+	// marshaler
+	g.P("func ", enc, encSig, " {")
+	g.P("m := msg.(*", mc.goName, ")")
+	for _, of := range ofields {
+		g.P("// ", of.getProtoName())
+		g.P("switch x := m.", of.goName, ".(type) {")
+		for _, sf := range of.subFields {
+			// also fills in field.wire
+			sf.marshalCase(g)
+		}
+		g.P("case nil:")
+		g.P("default:")
+		g.P(" return ", g.Pkg["fmt"], `.Errorf("`, mc.goName, ".", of.goName, ` has unexpected type %T", x)`)
+		g.P("}")
+	}
+	g.P("return nil")
+	g.P("}")
+	g.P()
+
+	// unmarshaler
+	g.P("func ", dec, decSig, " {")
+	g.P("m := msg.(*", mc.goName, ")")
+	g.P("switch tag {")
+	for _, of := range ofields {
+		for _, sf := range of.subFields {
+			sf.unmarshalCase(g, of.getProtoName(), of.goName)
+		}
+	}
+	g.P("default:")
+	g.P("return false, nil")
+	g.P("}")
+	g.P("}")
+	g.P()
+
+	// sizer
+	g.P("func ", size, sizeSig, " {")
+	g.P("m := msg.(*", mc.goName, ")")
+	for _, of := range ofields {
+		g.P("// ", of.getProtoName())
+		g.P("switch x := m.", of.goName, ".(type) {")
+		for _, sf := range of.subFields {
+			// also fills in field.wire
+			sf.sizerCase(g)
+		}
+		g.P("case nil:")
+		g.P("default:")
+		g.P("panic(", g.Pkg["fmt"], ".Sprintf(\"proto: unexpected type %T in oneof\", x))")
+		g.P("}")
+	}
+	g.P("return n")
+	g.P("}")
+	g.P()
+}
+
+// generateMessageStruct adds the actual struct with it's members (but not methods) to the output.
+func (g *Generator) generateMessageStruct(mc *msgCtx, topLevelFields []topLevelField) {
+	comments := g.PrintComments(mc.message.path)
+
+	// Guarantee deprecation comments appear after user-provided comments.
+	if mc.message.GetOptions().GetDeprecated() {
+		if comments {
+			// Convention: Separate deprecation comments from original
+			// comments with an empty line.
+			g.P("//")
+		}
+		g.P(deprecationComment)
+	}
+
+	g.P("type ", Annotate(mc.message.file, mc.message.path, mc.goName), " struct {")
+	for _, pf := range topLevelFields {
+		pf.decl(g, mc)
+	}
+	g.generateInternalStructFields(mc, topLevelFields)
+	g.P("}")
+}
+
+// generateGetters adds getters for all fields, including oneofs and weak fields when applicable.
+func (g *Generator) generateGetters(mc *msgCtx, topLevelFields []topLevelField) {
+	for _, pf := range topLevelFields {
+		pf.getter(g, mc)
+	}
+}
+
+// generateSetters add setters for all fields, including oneofs and weak fields when applicable.
+func (g *Generator) generateSetters(mc *msgCtx, topLevelFields []topLevelField) {
+	for _, pf := range topLevelFields {
+		pf.setter(g, mc)
+	}
+}
+
+// generateCommonMethods adds methods to the message that are not on a per field basis.
+func (g *Generator) generateCommonMethods(mc *msgCtx) {
+	// Reset, String and ProtoMessage methods.
+	g.P("func (m *", mc.goName, ") Reset() { *m = ", mc.goName, "{} }")
+	g.P("func (m *", mc.goName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }")
+	g.P("func (*", mc.goName, ") ProtoMessage() {}")
+	var indexes []string
+	for m := mc.message; m != nil; m = m.parent {
+		indexes = append([]string{strconv.Itoa(m.index)}, indexes...)
+	}
+	g.P("func (*", mc.goName, ") Descriptor() ([]byte, []int) {")
+	g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}")
+	g.P("}")
+	// TODO: Revisit the decision to use a XXX_WellKnownType method
+	// if we change proto.MessageName to work with multiple equivalents.
+	if mc.message.file.GetPackage() == "google.protobuf" && wellKnownTypes[mc.message.GetName()] {
+		g.P("func (*", mc.goName, `) XXX_WellKnownType() string { return "`, mc.message.GetName(), `" }`)
+	}
+
+	// Extension support methods
+	if len(mc.message.ExtensionRange) > 0 {
+		// message_set_wire_format only makes sense when extensions are defined.
+		if opts := mc.message.Options; opts != nil && opts.GetMessageSetWireFormat() {
+			g.P()
+			g.P("func (m *", mc.goName, ") MarshalJSON() ([]byte, error) {")
+			g.P("return ", g.Pkg["proto"], ".MarshalMessageSetJSON(&m.XXX_InternalExtensions)")
+			g.P("}")
+			g.P("func (m *", mc.goName, ") UnmarshalJSON(buf []byte) error {")
+			g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions)")
+			g.P("}")
+		}
+
+		g.P()
+		g.P("var extRange_", mc.goName, " = []", g.Pkg["proto"], ".ExtensionRange{")
+		for _, r := range mc.message.ExtensionRange {
+			end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends
+			g.P("{Start: ", r.Start, ", End: ", end, "},")
+		}
+		g.P("}")
+		g.P("func (*", mc.goName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {")
+		g.P("return extRange_", mc.goName)
+		g.P("}")
+	}
+
+	// TODO: It does not scale to keep adding another method for every
+	// operation on protos that we want to switch over to using the
+	// table-driven approach. Instead, we should only add a single method
+	// that allows getting access to the *InternalMessageInfo struct and then
+	// calling Unmarshal, Marshal, Merge, Size, and Discard directly on that.
+
+	// Wrapper for table-driven marshaling and unmarshaling.
+	g.P("func (m *", mc.goName, ") XXX_Unmarshal(b []byte) error {")
+	g.P("return xxx_messageInfo_", mc.goName, ".Unmarshal(m, b)")
+	g.P("}")
+
+	g.P("func (m *", mc.goName, ") XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {")
+	g.P("return xxx_messageInfo_", mc.goName, ".Marshal(b, m, deterministic)")
+	g.P("}")
+
+	g.P("func (dst *", mc.goName, ") XXX_Merge(src ", g.Pkg["proto"], ".Message) {")
+	g.P("xxx_messageInfo_", mc.goName, ".Merge(dst, src)")
+	g.P("}")
+
+	g.P("func (m *", mc.goName, ") XXX_Size() int {") // avoid name clash with "Size" field in some message
+	g.P("return xxx_messageInfo_", mc.goName, ".Size(m)")
+	g.P("}")
+
+	g.P("func (m *", mc.goName, ") XXX_DiscardUnknown() {")
+	g.P("xxx_messageInfo_", mc.goName, ".DiscardUnknown(m)")
+	g.P("}")
+
+	g.P("var xxx_messageInfo_", mc.goName, " ", g.Pkg["proto"], ".InternalMessageInfo")
+	g.P()
+}
+
+// Generate the type, methods and default constant definitions for this Descriptor.
+func (g *Generator) generateMessage(message *Descriptor) {
+	topLevelFields := []topLevelField{}
+	oFields := make(map[int32]*oneofField)
+	// The full type name
+	typeName := message.TypeName()
+	// The full type name, CamelCased.
+	goTypeName := CamelCaseSlice(typeName)
+
+	usedNames := make(map[string]bool)
+	for _, n := range methodNames {
+		usedNames[n] = true
+	}
+
+	// allocNames finds a conflict-free variation of the given strings,
+	// consistently mutating their suffixes.
+	// It returns the same number of strings.
+	allocNames := func(ns ...string) []string {
+	Loop:
+		for {
+			for _, n := range ns {
+				if usedNames[n] {
+					for i := range ns {
+						ns[i] += "_"
+					}
+					continue Loop
+				}
+			}
+			for _, n := range ns {
+				usedNames[n] = true
+			}
+			return ns
+		}
+	}
+
+	mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string) // keep track of the map fields to be added later
+
+	// Build a structure more suitable for generating the text in one pass
+	for i, field := range message.Field {
+		// Allocate the getter and the field at the same time so name
+		// collisions create field/method consistent names.
+		// TODO: This allocation occurs based on the order of the fields
+		// in the proto file, meaning that a change in the field
+		// ordering can change generated Method/Field names.
+		base := CamelCase(*field.Name)
+		ns := allocNames(base, "Get"+base)
+		fieldName, fieldGetterName := ns[0], ns[1]
+		typename, wiretype := g.GoType(message, field)
+		jsonName := *field.Name
+		tag := fmt.Sprintf("protobuf:%s json:%q", g.goTag(message, field, wiretype), jsonName+",omitempty")
+
+		oneof := field.OneofIndex != nil
+		if oneof && oFields[*field.OneofIndex] == nil {
+			odp := message.OneofDecl[int(*field.OneofIndex)]
+			base := CamelCase(odp.GetName())
+			names := allocNames(base, "Get"+base)
+			fname, gname := names[0], names[1]
+
+			// This is the first field of a oneof we haven't seen before.
+			// Generate the union field.
+			oneofFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex)
+			c, ok := g.makeComments(oneofFullPath)
+			if ok {
+				c += "\n//\n"
+			}
+			c += "// Types that are valid to be assigned to " + fname + ":\n"
+			// Generate the rest of this comment later,
+			// when we've computed any disambiguation.
+
+			dname := "is" + goTypeName + "_" + fname
+			tag := `protobuf_oneof:"` + odp.GetName() + `"`
+			of := oneofField{
+				fieldCommon: fieldCommon{
+					goName:     fname,
+					getterName: gname,
+					goType:     dname,
+					tags:       tag,
+					protoName:  odp.GetName(),
+					fullPath:   oneofFullPath,
+				},
+				comment: c,
+			}
+			topLevelFields = append(topLevelFields, &of)
+			oFields[*field.OneofIndex] = &of
+		}
+
+		if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
+			desc := g.ObjectNamed(field.GetTypeName())
+			if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() {
+				// Figure out the Go types and tags for the key and value types.
+				keyField, valField := d.Field[0], d.Field[1]
+				keyType, keyWire := g.GoType(d, keyField)
+				valType, valWire := g.GoType(d, valField)
+				keyTag, valTag := g.goTag(d, keyField, keyWire), g.goTag(d, valField, valWire)
+
+				// We don't use stars, except for message-typed values.
+				// Message and enum types are the only two possibly foreign types used in maps,
+				// so record their use. They are not permitted as map keys.
+				keyType = strings.TrimPrefix(keyType, "*")
+				switch *valField.Type {
+				case descriptor.FieldDescriptorProto_TYPE_ENUM:
+					valType = strings.TrimPrefix(valType, "*")
+					g.RecordTypeUse(valField.GetTypeName())
+				case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+					g.RecordTypeUse(valField.GetTypeName())
+				default:
+					valType = strings.TrimPrefix(valType, "*")
+				}
+
+				typename = fmt.Sprintf("map[%s]%s", keyType, valType)
+				mapFieldTypes[field] = typename // record for the getter generation
+
+				tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", keyTag, valTag)
+			}
+		}
+
+		dvalue := g.getterDefault(field, goTypeName)
+		if oneof {
+			tname := goTypeName + "_" + fieldName
+			// It is possible for this to collide with a message or enum
+			// nested in this message. Check for collisions.
+			for {
+				ok := true
+				for _, desc := range message.nested {
+					if CamelCaseSlice(desc.TypeName()) == tname {
+						ok = false
+						break
+					}
+				}
+				for _, enum := range message.enums {
+					if CamelCaseSlice(enum.TypeName()) == tname {
+						ok = false
+						break
+					}
+				}
+				if !ok {
+					tname += "_"
+					continue
+				}
+				break
+			}
+
+			oneofField := oFields[*field.OneofIndex]
+			tag := "protobuf:" + g.goTag(message, field, wiretype)
+			sf := oneofSubField{
+				fieldCommon: fieldCommon{
+					goName:     fieldName,
+					getterName: fieldGetterName,
+					goType:     typename,
+					tags:       tag,
+					protoName:  field.GetName(),
+					fullPath:   fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i),
+				},
+				protoTypeName: field.GetTypeName(),
+				fieldNumber:   int(*field.Number),
+				protoType:     *field.Type,
+				getterDef:     dvalue,
+				protoDef:      field.GetDefaultValue(),
+				oneofTypeName: tname,
+			}
+			oneofField.subFields = append(oneofField.subFields, &sf)
+			g.RecordTypeUse(field.GetTypeName())
+			continue
+		}
+
+		fieldDeprecated := ""
+		if field.GetOptions().GetDeprecated() {
+			fieldDeprecated = deprecationComment
+		}
+
+		fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i)
+		c, ok := g.makeComments(fieldFullPath)
+		if ok {
+			c += "\n"
+		}
+		rf := simpleField{
+			fieldCommon: fieldCommon{
+				goName:     fieldName,
+				getterName: fieldGetterName,
+				goType:     typename,
+				tags:       tag,
+				protoName:  field.GetName(),
+				fullPath:   fieldFullPath,
+			},
+			protoTypeName: field.GetTypeName(),
+			protoType:     *field.Type,
+			deprecated:    fieldDeprecated,
+			getterDef:     dvalue,
+			protoDef:      field.GetDefaultValue(),
+			comment:       c,
+		}
+		var pf topLevelField = &rf
+
+		topLevelFields = append(topLevelFields, pf)
+		g.RecordTypeUse(field.GetTypeName())
+	}
+
+	mc := &msgCtx{
+		goName:  goTypeName,
+		message: message,
+	}
+
+	g.generateMessageStruct(mc, topLevelFields)
+	g.P()
+	g.generateCommonMethods(mc)
+	g.P()
+	g.generateDefaultConstants(mc, topLevelFields)
+	g.P()
+	g.generateGetters(mc, topLevelFields)
+	g.P()
+	g.generateSetters(mc, topLevelFields)
+	g.P()
+	g.generateOneofFuncs(mc, topLevelFields)
+	g.P()
+
+	if !message.group {
+
+		var oneofTypes []string
+		for _, f := range topLevelFields {
+			if of, ok := f.(*oneofField); ok {
+				for _, osf := range of.subFields {
+					oneofTypes = append(oneofTypes, osf.oneofTypeName)
+				}
+			}
+		}
+
+		opts := message.Options
+		ms := &messageSymbol{
+			sym:           goTypeName,
+			hasExtensions: len(message.ExtensionRange) > 0,
+			isMessageSet:  opts != nil && opts.GetMessageSetWireFormat(),
+			oneofTypes:    oneofTypes,
+		}
+		g.file.addExport(message, ms)
+	}
+
+	for _, ext := range message.ext {
+		g.generateExtension(ext)
+	}
+
+	fullName := strings.Join(message.TypeName(), ".")
+	if g.file.Package != nil {
+		fullName = *g.file.Package + "." + fullName
+	}
+
+	g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], goTypeName, fullName)
+	// Register types for native map types.
+	for _, k := range mapFieldKeys(mapFieldTypes) {
+		fullName := strings.TrimPrefix(*k.TypeName, ".")
+		g.addInitf("%s.RegisterMapType((%s)(nil), %q)", g.Pkg["proto"], mapFieldTypes[k], fullName)
+	}
+
+}
+
+type byTypeName []*descriptor.FieldDescriptorProto
+
+func (a byTypeName) Len() int           { return len(a) }
+func (a byTypeName) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a byTypeName) Less(i, j int) bool { return *a[i].TypeName < *a[j].TypeName }
+
+// mapFieldKeys returns the keys of m in a consistent order.
+func mapFieldKeys(m map[*descriptor.FieldDescriptorProto]string) []*descriptor.FieldDescriptorProto {
+	keys := make([]*descriptor.FieldDescriptorProto, 0, len(m))
+	for k := range m {
+		keys = append(keys, k)
+	}
+	sort.Sort(byTypeName(keys))
+	return keys
+}
+
+var escapeChars = [256]byte{
+	'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"', '\'': '\'', '?': '?',
+}
+
+// unescape reverses the "C" escaping that protoc does for default values of bytes fields.
+// It is best effort in that it effectively ignores malformed input. Seemingly invalid escape
+// sequences are conveyed, unmodified, into the decoded result.
+func unescape(s string) string {
+	// NB: Sadly, we can't use strconv.Unquote because protoc will escape both
+	// single and double quotes, but strconv.Unquote only allows one or the
+	// other (based on actual surrounding quotes of its input argument).
+
+	var out []byte
+	for len(s) > 0 {
+		// regular character, or too short to be valid escape
+		if s[0] != '\\' || len(s) < 2 {
+			out = append(out, s[0])
+			s = s[1:]
+		} else if c := escapeChars[s[1]]; c != 0 {
+			// escape sequence
+			out = append(out, c)
+			s = s[2:]
+		} else if s[1] == 'x' || s[1] == 'X' {
+			// hex escape, e.g. "\x80
+			if len(s) < 4 {
+				// too short to be valid
+				out = append(out, s[:2]...)
+				s = s[2:]
+				continue
+			}
+			v, err := strconv.ParseUint(s[2:4], 16, 8)
+			if err != nil {
+				out = append(out, s[:4]...)
+			} else {
+				out = append(out, byte(v))
+			}
+			s = s[4:]
+		} else if '0' <= s[1] && s[1] <= '7' {
+			// octal escape, can vary from 1 to 3 octal digits; e.g., "\0" "\40" or "\164"
+			// so consume up to 2 more bytes or up to end-of-string
+			n := len(s[1:]) - len(strings.TrimLeft(s[1:], "01234567"))
+			if n > 3 {
+				n = 3
+			}
+			v, err := strconv.ParseUint(s[1:1+n], 8, 8)
+			if err != nil {
+				out = append(out, s[:1+n]...)
+			} else {
+				out = append(out, byte(v))
+			}
+			s = s[1+n:]
+		} else {
+			// bad escape, just propagate the slash as-is
+			out = append(out, s[0])
+			s = s[1:]
+		}
+	}
+
+	return string(out)
+}
+
+func (g *Generator) generateExtension(ext *ExtensionDescriptor) {
+	ccTypeName := ext.DescName()
+
+	extObj := g.ObjectNamed(*ext.Extendee)
+	var extDesc *Descriptor
+	if id, ok := extObj.(*ImportedDescriptor); ok {
+		// This is extending a publicly imported message.
+		// We need the underlying type for goTag.
+		extDesc = id.o.(*Descriptor)
+	} else {
+		extDesc = extObj.(*Descriptor)
+	}
+	extendedType := "*" + g.TypeName(extObj) // always use the original
+	field := ext.FieldDescriptorProto
+	fieldType, wireType := g.GoType(ext.parent, field)
+	tag := g.goTag(extDesc, field, wireType)
+	g.RecordTypeUse(*ext.Extendee)
+	if n := ext.FieldDescriptorProto.TypeName; n != nil {
+		// foreign extension type
+		g.RecordTypeUse(*n)
+	}
+
+	typeName := ext.TypeName()
+
+	// Special case for proto2 message sets: If this extension is extending
+	// proto2.bridge.MessageSet, and its final name component is "message_set_extension",
+	// then drop that last component.
+	//
+	// TODO: This should be implemented in the text formatter rather than the generator.
+	// In addition, the situation for when to apply this special case is implemented
+	// differently in other languages:
+	// https://github.com/google/protobuf/blob/aff10976/src/google/protobuf/text_format.cc#L1560
+	mset := false
+	if extDesc.GetOptions().GetMessageSetWireFormat() && typeName[len(typeName)-1] == "message_set_extension" {
+		typeName = typeName[:len(typeName)-1]
+		mset = true
+	}
+
+	// For text formatting, the package must be exactly what the .proto file declares,
+	// ignoring overrides such as the go_package option, and with no dot/underscore mapping.
+	extName := strings.Join(typeName, ".")
+	if g.file.Package != nil {
+		extName = *g.file.Package + "." + extName
+	}
+
+	g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{")
+	g.P("ExtendedType: (", extendedType, ")(nil),")
+	g.P("ExtensionType: (", fieldType, ")(nil),")
+	g.P("Field: ", field.Number, ",")
+	g.P(`Name: "`, extName, `",`)
+	g.P("Tag: ", tag, ",")
+	g.P(`Filename: "`, g.file.GetName(), `",`)
+
+	g.P("}")
+	g.P()
+
+	if mset {
+		// Generate a bit more code to register with message_set.go.
+		g.addInitf("%s.RegisterMessageSetType((%s)(nil), %d, %q)", g.Pkg["proto"], fieldType, *field.Number, extName)
+	}
+
+	g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""})
+}
+
+func (g *Generator) generateInitFunction() {
+	for _, enum := range g.file.enum {
+		g.generateEnumRegistration(enum)
+	}
+	for _, d := range g.file.desc {
+		for _, ext := range d.ext {
+			g.generateExtensionRegistration(ext)
+		}
+	}
+	for _, ext := range g.file.ext {
+		g.generateExtensionRegistration(ext)
+	}
+	if len(g.init) == 0 {
+		return
+	}
+	g.P("func init() {")
+	for _, l := range g.init {
+		g.P(l)
+	}
+	g.P("}")
+	g.init = nil
+}
+
+func (g *Generator) generateFileDescriptor(file *FileDescriptor) {
+	// Make a copy and trim source_code_info data.
+	// TODO: Trim this more when we know exactly what we need.
+	pb := proto.Clone(file.FileDescriptorProto).(*descriptor.FileDescriptorProto)
+	pb.SourceCodeInfo = nil
+
+	b, err := proto.Marshal(pb)
+	if err != nil {
+		g.Fail(err.Error())
+	}
+
+	var buf bytes.Buffer
+	w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression)
+	w.Write(b)
+	w.Close()
+	b = buf.Bytes()
+
+	v := file.VarName()
+	g.P()
+	g.P("func init() { ", g.Pkg["proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ") }")
+	g.P("var ", v, " = []byte{")
+	g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto")
+	for len(b) > 0 {
+		n := 16
+		if n > len(b) {
+			n = len(b)
+		}
+
+		s := ""
+		for _, c := range b[:n] {
+			s += fmt.Sprintf("0x%02x,", c)
+		}
+		g.P(s)
+
+		b = b[n:]
+	}
+	g.P("}")
+}
+
+func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) {
+	// // We always print the full (proto-world) package name here.
+	pkg := enum.File().GetPackage()
+	if pkg != "" {
+		pkg += "."
+	}
+	// The full type name
+	typeName := enum.TypeName()
+	// The full type name, CamelCased.
+	ccTypeName := CamelCaseSlice(typeName)
+	g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName)
+}
+
+func (g *Generator) generateExtensionRegistration(ext *ExtensionDescriptor) {
+	g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName())
+}
+
+// And now lots of helper functions.
+
+// Is c an ASCII lower-case letter?
+func isASCIILower(c byte) bool {
+	return 'a' <= c && c <= 'z'
+}
+
+// Is c an ASCII digit?
+func isASCIIDigit(c byte) bool {
+	return '0' <= c && c <= '9'
+}
+
+// CamelCase returns the CamelCased name.
+// If there is an interior underscore followed by a lower case letter,
+// drop the underscore and convert the letter to upper case.
+// There is a remote possibility of this rewrite causing a name collision,
+// but it's so remote we're prepared to pretend it's nonexistent - since the
+// C++ generator lowercases names, it's extremely unlikely to have two fields
+// with different capitalizations.
+// In short, _my_field_name_2 becomes XMyFieldName_2.
+func CamelCase(s string) string {
+	if s == "" {
+		return ""
+	}
+	t := make([]byte, 0, 32)
+	i := 0
+	if s[0] == '_' {
+		// Need a capital letter; drop the '_'.
+		t = append(t, 'X')
+		i++
+	}
+	// Invariant: if the next letter is lower case, it must be converted
+	// to upper case.
+	// That is, we process a word at a time, where words are marked by _ or
+	// upper case letter. Digits are treated as words.
+	for ; i < len(s); i++ {
+		c := s[i]
+		if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) {
+			continue // Skip the underscore in s.
+		}
+		if isASCIIDigit(c) {
+			t = append(t, c)
+			continue
+		}
+		// Assume we have a letter now - if not, it's a bogus identifier.
+		// The next word is a sequence of characters that must start upper case.
+		if isASCIILower(c) {
+			c ^= ' ' // Make it a capital letter.
+		}
+		t = append(t, c) // Guaranteed not lower case.
+		// Accept lower case sequence that follows.
+		for i+1 < len(s) && isASCIILower(s[i+1]) {
+			i++
+			t = append(t, s[i])
+		}
+	}
+	return string(t)
+}
+
+// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to
+// be joined with "_".
+func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) }
+
+// dottedSlice turns a sliced name into a dotted name.
+func dottedSlice(elem []string) string { return strings.Join(elem, ".") }
+
+// Is this field optional?
+func isOptional(field *descriptor.FieldDescriptorProto) bool {
+	return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL
+}
+
+// Is this field required?
+func isRequired(field *descriptor.FieldDescriptorProto) bool {
+	return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED
+}
+
+// Is this field repeated?
+func isRepeated(field *descriptor.FieldDescriptorProto) bool {
+	return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED
+}
+
+// Is this field a scalar numeric type?
+func isScalar(field *descriptor.FieldDescriptorProto) bool {
+	if field.Type == nil {
+		return false
+	}
+	switch *field.Type {
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE,
+		descriptor.FieldDescriptorProto_TYPE_FLOAT,
+		descriptor.FieldDescriptorProto_TYPE_INT64,
+		descriptor.FieldDescriptorProto_TYPE_UINT64,
+		descriptor.FieldDescriptorProto_TYPE_INT32,
+		descriptor.FieldDescriptorProto_TYPE_FIXED64,
+		descriptor.FieldDescriptorProto_TYPE_FIXED32,
+		descriptor.FieldDescriptorProto_TYPE_BOOL,
+		descriptor.FieldDescriptorProto_TYPE_UINT32,
+		descriptor.FieldDescriptorProto_TYPE_ENUM,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED32,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED64,
+		descriptor.FieldDescriptorProto_TYPE_SINT32,
+		descriptor.FieldDescriptorProto_TYPE_SINT64:
+		return true
+	default:
+		return false
+	}
+}
+
+// badToUnderscore is the mapping function used to generate Go names from package names,
+// which can be dotted in the input .proto file.  It replaces non-identifier characters such as
+// dot or dash with underscore.
+func badToUnderscore(r rune) rune {
+	if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' {
+		return r
+	}
+	return '_'
+}
+
+// baseName returns the last path element of the name, with the last dotted suffix removed.
+func baseName(name string) string {
+	// First, find the last element
+	if i := strings.LastIndex(name, "/"); i >= 0 {
+		name = name[i+1:]
+	}
+	// Now drop the suffix
+	if i := strings.LastIndex(name, "."); i >= 0 {
+		name = name[0:i]
+	}
+	return name
+}
+
+// The SourceCodeInfo message describes the location of elements of a parsed
+// .proto file by way of a "path", which is a sequence of integers that
+// describe the route from a FileDescriptorProto to the relevant submessage.
+// The path alternates between a field number of a repeated field, and an index
+// into that repeated field. The constants below define the field numbers that
+// are used.
+//
+// See descriptor.proto for more information about this.
+const (
+	// tag numbers in FileDescriptorProto
+	packagePath = 2 // package
+	messagePath = 4 // message_type
+	enumPath    = 5 // enum_type
+	// tag numbers in DescriptorProto
+	messageFieldPath   = 2 // field
+	messageMessagePath = 3 // nested_type
+	messageEnumPath    = 4 // enum_type
+	messageOneofPath   = 8 // oneof_decl
+	// tag numbers in EnumDescriptorProto
+	enumValuePath = 2 // value
+)
+
+var supportTypeAliases bool
+
+func init() {
+	for _, tag := range build.Default.ReleaseTags {
+		if tag == "go1.9" {
+			supportTypeAliases = true
+			return
+		}
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go
new file mode 100644
index 0000000..a9b6103
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go
@@ -0,0 +1,117 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package remap handles tracking the locations of Go tokens in a source text
+across a rewrite by the Go formatter.
+*/
+package remap
+
+import (
+	"fmt"
+	"go/scanner"
+	"go/token"
+)
+
+// A Location represents a span of byte offsets in the source text.
+type Location struct {
+	Pos, End int // End is exclusive
+}
+
+// A Map represents a mapping between token locations in an input source text
+// and locations in the correspnding output text.
+type Map map[Location]Location
+
+// Find reports whether the specified span is recorded by m, and if so returns
+// the new location it was mapped to. If the input span was not found, the
+// returned location is the same as the input.
+func (m Map) Find(pos, end int) (Location, bool) {
+	key := Location{
+		Pos: pos,
+		End: end,
+	}
+	if loc, ok := m[key]; ok {
+		return loc, true
+	}
+	return key, false
+}
+
+func (m Map) add(opos, oend, npos, nend int) {
+	m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend}
+}
+
+// Compute constructs a location mapping from input to output.  An error is
+// reported if any of the tokens of output cannot be mapped.
+func Compute(input, output []byte) (Map, error) {
+	itok := tokenize(input)
+	otok := tokenize(output)
+	if len(itok) != len(otok) {
+		return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok))
+	}
+	m := make(Map)
+	for i, ti := range itok {
+		to := otok[i]
+		if ti.Token != to.Token {
+			return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to)
+		}
+		m.add(ti.pos, ti.end, to.pos, to.end)
+	}
+	return m, nil
+}
+
+// tokinfo records the span and type of a source token.
+type tokinfo struct {
+	pos, end int
+	token.Token
+}
+
+func tokenize(src []byte) []tokinfo {
+	fs := token.NewFileSet()
+	var s scanner.Scanner
+	s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments)
+	var info []tokinfo
+	for {
+		pos, next, lit := s.Scan()
+		switch next {
+		case token.SEMICOLON:
+			continue
+		}
+		info = append(info, tokinfo{
+			pos:   int(pos - 1),
+			end:   int(pos + token.Pos(len(lit)) - 1),
+			Token: next,
+		})
+		if next == token.EOF {
+			break
+		}
+	}
+	return info
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go
new file mode 100644
index 0000000..faef1ab
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go
@@ -0,0 +1,484 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2015 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package grpc outputs gRPC service descriptions in Go code.
+// It runs as a plugin for the Go protocol buffer compiler plugin.
+// It is linked in to protoc-gen-go.
+package grpc
+
+import (
+	"fmt"
+	"path"
+	"strconv"
+	"strings"
+
+	pb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"github.com/golang/protobuf/protoc-gen-go/generator"
+)
+
+// generatedCodeVersion indicates a version of the generated code.
+// It is incremented whenever an incompatibility between the generated code and
+// the grpc package is introduced; the generated code references
+// a constant, grpc.SupportPackageIsVersionN (where N is generatedCodeVersion).
+const generatedCodeVersion = 4
+
+// Paths for packages used by code generated in this file,
+// relative to the import_prefix of the generator.Generator.
+const (
+	contextPkgPath = "golang.org/x/net/context"
+	grpcPkgPath    = "google.golang.org/grpc"
+)
+
+func init() {
+	generator.RegisterPlugin(new(grpc))
+}
+
+// grpc is an implementation of the Go protocol buffer compiler's
+// plugin architecture.  It generates bindings for gRPC support.
+type grpc struct {
+	gen *generator.Generator
+}
+
+// Name returns the name of this plugin, "grpc".
+func (g *grpc) Name() string {
+	return "grpc"
+}
+
+// The names for packages imported in the generated code.
+// They may vary from the final path component of the import path
+// if the name is used by other packages.
+var (
+	contextPkg string
+	grpcPkg    string
+)
+
+// Init initializes the plugin.
+func (g *grpc) Init(gen *generator.Generator) {
+	g.gen = gen
+	contextPkg = generator.RegisterUniquePackageName("context", nil)
+	grpcPkg = generator.RegisterUniquePackageName("grpc", nil)
+}
+
+// Given a type name defined in a .proto, return its object.
+// Also record that we're using it, to guarantee the associated import.
+func (g *grpc) objectNamed(name string) generator.Object {
+	g.gen.RecordTypeUse(name)
+	return g.gen.ObjectNamed(name)
+}
+
+// Given a type name defined in a .proto, return its name as we will print it.
+func (g *grpc) typeName(str string) string {
+	return g.gen.TypeName(g.objectNamed(str))
+}
+
+// P forwards to g.gen.P.
+func (g *grpc) P(args ...interface{}) { g.gen.P(args...) }
+
+// Generate generates code for the services in the given file.
+func (g *grpc) Generate(file *generator.FileDescriptor) {
+	if len(file.FileDescriptorProto.Service) == 0 {
+		return
+	}
+
+	g.P("// Reference imports to suppress errors if they are not otherwise used.")
+	g.P("var _ ", contextPkg, ".Context")
+	g.P("var _ ", grpcPkg, ".ClientConn")
+	g.P()
+
+	// Assert version compatibility.
+	g.P("// This is a compile-time assertion to ensure that this generated file")
+	g.P("// is compatible with the grpc package it is being compiled against.")
+	g.P("const _ = ", grpcPkg, ".SupportPackageIsVersion", generatedCodeVersion)
+	g.P()
+
+	for i, service := range file.FileDescriptorProto.Service {
+		g.generateService(file, service, i)
+	}
+}
+
+// GenerateImports generates the import declaration for this file.
+func (g *grpc) GenerateImports(file *generator.FileDescriptor) {
+	if len(file.FileDescriptorProto.Service) == 0 {
+		return
+	}
+	g.P("import (")
+	g.P(contextPkg, " ", generator.GoImportPath(path.Join(string(g.gen.ImportPrefix), contextPkgPath)))
+	g.P(grpcPkg, " ", generator.GoImportPath(path.Join(string(g.gen.ImportPrefix), grpcPkgPath)))
+	g.P(")")
+	g.P()
+}
+
+// reservedClientName records whether a client name is reserved on the client side.
+var reservedClientName = map[string]bool{
+	// TODO: do we need any in gRPC?
+}
+
+func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] }
+
+// deprecationComment is the standard comment added to deprecated
+// messages, fields, enums, and enum values.
+var deprecationComment = "// Deprecated: Do not use."
+
+// generateService generates all the code for the named service.
+func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.ServiceDescriptorProto, index int) {
+	path := fmt.Sprintf("6,%d", index) // 6 means service.
+
+	origServName := service.GetName()
+	fullServName := origServName
+	if pkg := file.GetPackage(); pkg != "" {
+		fullServName = pkg + "." + fullServName
+	}
+	servName := generator.CamelCase(origServName)
+	deprecated := service.GetOptions().GetDeprecated()
+
+	g.P()
+	g.P(fmt.Sprintf(`// %sClient is the client API for %s service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.`, servName, servName))
+
+	// Client interface.
+	if deprecated {
+		g.P("//")
+		g.P(deprecationComment)
+	}
+	g.P("type ", servName, "Client interface {")
+	for i, method := range service.Method {
+		g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service.
+		g.P(g.generateClientSignature(servName, method))
+	}
+	g.P("}")
+	g.P()
+
+	// Client structure.
+	g.P("type ", unexport(servName), "Client struct {")
+	g.P("cc *", grpcPkg, ".ClientConn")
+	g.P("}")
+	g.P()
+
+	// NewClient factory.
+	if deprecated {
+		g.P(deprecationComment)
+	}
+	g.P("func New", servName, "Client (cc *", grpcPkg, ".ClientConn) ", servName, "Client {")
+	g.P("return &", unexport(servName), "Client{cc}")
+	g.P("}")
+	g.P()
+
+	var methodIndex, streamIndex int
+	serviceDescVar := "_" + servName + "_serviceDesc"
+	// Client method implementations.
+	for _, method := range service.Method {
+		var descExpr string
+		if !method.GetServerStreaming() && !method.GetClientStreaming() {
+			// Unary RPC method
+			descExpr = fmt.Sprintf("&%s.Methods[%d]", serviceDescVar, methodIndex)
+			methodIndex++
+		} else {
+			// Streaming RPC method
+			descExpr = fmt.Sprintf("&%s.Streams[%d]", serviceDescVar, streamIndex)
+			streamIndex++
+		}
+		g.generateClientMethod(servName, fullServName, serviceDescVar, method, descExpr)
+	}
+
+	// Server interface.
+	serverType := servName + "Server"
+	g.P("// ", serverType, " is the server API for ", servName, " service.")
+	if deprecated {
+		g.P("//")
+		g.P(deprecationComment)
+	}
+	g.P("type ", serverType, " interface {")
+	for i, method := range service.Method {
+		g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service.
+		g.P(g.generateServerSignature(servName, method))
+	}
+	g.P("}")
+	g.P()
+
+	// Server registration.
+	if deprecated {
+		g.P(deprecationComment)
+	}
+	g.P("func Register", servName, "Server(s *", grpcPkg, ".Server, srv ", serverType, ") {")
+	g.P("s.RegisterService(&", serviceDescVar, `, srv)`)
+	g.P("}")
+	g.P()
+
+	// Server handler implementations.
+	var handlerNames []string
+	for _, method := range service.Method {
+		hname := g.generateServerMethod(servName, fullServName, method)
+		handlerNames = append(handlerNames, hname)
+	}
+
+	// Service descriptor.
+	g.P("var ", serviceDescVar, " = ", grpcPkg, ".ServiceDesc {")
+	g.P("ServiceName: ", strconv.Quote(fullServName), ",")
+	g.P("HandlerType: (*", serverType, ")(nil),")
+	g.P("Methods: []", grpcPkg, ".MethodDesc{")
+	for i, method := range service.Method {
+		if method.GetServerStreaming() || method.GetClientStreaming() {
+			continue
+		}
+		g.P("{")
+		g.P("MethodName: ", strconv.Quote(method.GetName()), ",")
+		g.P("Handler: ", handlerNames[i], ",")
+		g.P("},")
+	}
+	g.P("},")
+	g.P("Streams: []", grpcPkg, ".StreamDesc{")
+	for i, method := range service.Method {
+		if !method.GetServerStreaming() && !method.GetClientStreaming() {
+			continue
+		}
+		g.P("{")
+		g.P("StreamName: ", strconv.Quote(method.GetName()), ",")
+		g.P("Handler: ", handlerNames[i], ",")
+		if method.GetServerStreaming() {
+			g.P("ServerStreams: true,")
+		}
+		if method.GetClientStreaming() {
+			g.P("ClientStreams: true,")
+		}
+		g.P("},")
+	}
+	g.P("},")
+	g.P("Metadata: \"", file.GetName(), "\",")
+	g.P("}")
+	g.P()
+}
+
+// generateClientSignature returns the client-side signature for a method.
+func (g *grpc) generateClientSignature(servName string, method *pb.MethodDescriptorProto) string {
+	origMethName := method.GetName()
+	methName := generator.CamelCase(origMethName)
+	if reservedClientName[methName] {
+		methName += "_"
+	}
+	reqArg := ", in *" + g.typeName(method.GetInputType())
+	if method.GetClientStreaming() {
+		reqArg = ""
+	}
+	respName := "*" + g.typeName(method.GetOutputType())
+	if method.GetServerStreaming() || method.GetClientStreaming() {
+		respName = servName + "_" + generator.CamelCase(origMethName) + "Client"
+	}
+	return fmt.Sprintf("%s(ctx %s.Context%s, opts ...%s.CallOption) (%s, error)", methName, contextPkg, reqArg, grpcPkg, respName)
+}
+
+func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar string, method *pb.MethodDescriptorProto, descExpr string) {
+	sname := fmt.Sprintf("/%s/%s", fullServName, method.GetName())
+	methName := generator.CamelCase(method.GetName())
+	inType := g.typeName(method.GetInputType())
+	outType := g.typeName(method.GetOutputType())
+
+	if method.GetOptions().GetDeprecated() {
+		g.P(deprecationComment)
+	}
+	g.P("func (c *", unexport(servName), "Client) ", g.generateClientSignature(servName, method), "{")
+	if !method.GetServerStreaming() && !method.GetClientStreaming() {
+		g.P("out := new(", outType, ")")
+		// TODO: Pass descExpr to Invoke.
+		g.P(`err := c.cc.Invoke(ctx, "`, sname, `", in, out, opts...)`)
+		g.P("if err != nil { return nil, err }")
+		g.P("return out, nil")
+		g.P("}")
+		g.P()
+		return
+	}
+	streamType := unexport(servName) + methName + "Client"
+	g.P("stream, err := c.cc.NewStream(ctx, ", descExpr, `, "`, sname, `", opts...)`)
+	g.P("if err != nil { return nil, err }")
+	g.P("x := &", streamType, "{stream}")
+	if !method.GetClientStreaming() {
+		g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }")
+		g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }")
+	}
+	g.P("return x, nil")
+	g.P("}")
+	g.P()
+
+	genSend := method.GetClientStreaming()
+	genRecv := method.GetServerStreaming()
+	genCloseAndRecv := !method.GetServerStreaming()
+
+	// Stream auxiliary types and methods.
+	g.P("type ", servName, "_", methName, "Client interface {")
+	if genSend {
+		g.P("Send(*", inType, ") error")
+	}
+	if genRecv {
+		g.P("Recv() (*", outType, ", error)")
+	}
+	if genCloseAndRecv {
+		g.P("CloseAndRecv() (*", outType, ", error)")
+	}
+	g.P(grpcPkg, ".ClientStream")
+	g.P("}")
+	g.P()
+
+	g.P("type ", streamType, " struct {")
+	g.P(grpcPkg, ".ClientStream")
+	g.P("}")
+	g.P()
+
+	if genSend {
+		g.P("func (x *", streamType, ") Send(m *", inType, ") error {")
+		g.P("return x.ClientStream.SendMsg(m)")
+		g.P("}")
+		g.P()
+	}
+	if genRecv {
+		g.P("func (x *", streamType, ") Recv() (*", outType, ", error) {")
+		g.P("m := new(", outType, ")")
+		g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }")
+		g.P("return m, nil")
+		g.P("}")
+		g.P()
+	}
+	if genCloseAndRecv {
+		g.P("func (x *", streamType, ") CloseAndRecv() (*", outType, ", error) {")
+		g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }")
+		g.P("m := new(", outType, ")")
+		g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }")
+		g.P("return m, nil")
+		g.P("}")
+		g.P()
+	}
+}
+
+// generateServerSignature returns the server-side signature for a method.
+func (g *grpc) generateServerSignature(servName string, method *pb.MethodDescriptorProto) string {
+	origMethName := method.GetName()
+	methName := generator.CamelCase(origMethName)
+	if reservedClientName[methName] {
+		methName += "_"
+	}
+
+	var reqArgs []string
+	ret := "error"
+	if !method.GetServerStreaming() && !method.GetClientStreaming() {
+		reqArgs = append(reqArgs, contextPkg+".Context")
+		ret = "(*" + g.typeName(method.GetOutputType()) + ", error)"
+	}
+	if !method.GetClientStreaming() {
+		reqArgs = append(reqArgs, "*"+g.typeName(method.GetInputType()))
+	}
+	if method.GetServerStreaming() || method.GetClientStreaming() {
+		reqArgs = append(reqArgs, servName+"_"+generator.CamelCase(origMethName)+"Server")
+	}
+
+	return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret
+}
+
+func (g *grpc) generateServerMethod(servName, fullServName string, method *pb.MethodDescriptorProto) string {
+	methName := generator.CamelCase(method.GetName())
+	hname := fmt.Sprintf("_%s_%s_Handler", servName, methName)
+	inType := g.typeName(method.GetInputType())
+	outType := g.typeName(method.GetOutputType())
+
+	if !method.GetServerStreaming() && !method.GetClientStreaming() {
+		g.P("func ", hname, "(srv interface{}, ctx ", contextPkg, ".Context, dec func(interface{}) error, interceptor ", grpcPkg, ".UnaryServerInterceptor) (interface{}, error) {")
+		g.P("in := new(", inType, ")")
+		g.P("if err := dec(in); err != nil { return nil, err }")
+		g.P("if interceptor == nil { return srv.(", servName, "Server).", methName, "(ctx, in) }")
+		g.P("info := &", grpcPkg, ".UnaryServerInfo{")
+		g.P("Server: srv,")
+		g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", fullServName, methName)), ",")
+		g.P("}")
+		g.P("handler := func(ctx ", contextPkg, ".Context, req interface{}) (interface{}, error) {")
+		g.P("return srv.(", servName, "Server).", methName, "(ctx, req.(*", inType, "))")
+		g.P("}")
+		g.P("return interceptor(ctx, in, info, handler)")
+		g.P("}")
+		g.P()
+		return hname
+	}
+	streamType := unexport(servName) + methName + "Server"
+	g.P("func ", hname, "(srv interface{}, stream ", grpcPkg, ".ServerStream) error {")
+	if !method.GetClientStreaming() {
+		g.P("m := new(", inType, ")")
+		g.P("if err := stream.RecvMsg(m); err != nil { return err }")
+		g.P("return srv.(", servName, "Server).", methName, "(m, &", streamType, "{stream})")
+	} else {
+		g.P("return srv.(", servName, "Server).", methName, "(&", streamType, "{stream})")
+	}
+	g.P("}")
+	g.P()
+
+	genSend := method.GetServerStreaming()
+	genSendAndClose := !method.GetServerStreaming()
+	genRecv := method.GetClientStreaming()
+
+	// Stream auxiliary types and methods.
+	g.P("type ", servName, "_", methName, "Server interface {")
+	if genSend {
+		g.P("Send(*", outType, ") error")
+	}
+	if genSendAndClose {
+		g.P("SendAndClose(*", outType, ") error")
+	}
+	if genRecv {
+		g.P("Recv() (*", inType, ", error)")
+	}
+	g.P(grpcPkg, ".ServerStream")
+	g.P("}")
+	g.P()
+
+	g.P("type ", streamType, " struct {")
+	g.P(grpcPkg, ".ServerStream")
+	g.P("}")
+	g.P()
+
+	if genSend {
+		g.P("func (x *", streamType, ") Send(m *", outType, ") error {")
+		g.P("return x.ServerStream.SendMsg(m)")
+		g.P("}")
+		g.P()
+	}
+	if genSendAndClose {
+		g.P("func (x *", streamType, ") SendAndClose(m *", outType, ") error {")
+		g.P("return x.ServerStream.SendMsg(m)")
+		g.P("}")
+		g.P()
+	}
+	if genRecv {
+		g.P("func (x *", streamType, ") Recv() (*", inType, ", error) {")
+		g.P("m := new(", inType, ")")
+		g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }")
+		g.P("return m, nil")
+		g.P("}")
+		g.P()
+	}
+
+	return hname
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go
new file mode 100644
index 0000000..532a550
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/link_grpc.go
@@ -0,0 +1,34 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2015 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package main
+
+import _ "github.com/golang/protobuf/protoc-gen-go/grpc"
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/main.go b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go
new file mode 100644
index 0000000..8e2486d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go
@@ -0,0 +1,98 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// protoc-gen-go is a plugin for the Google protocol buffer compiler to generate
+// Go code.  Run it by building this program and putting it in your path with
+// the name
+// 	protoc-gen-go
+// That word 'go' at the end becomes part of the option string set for the
+// protocol compiler, so once the protocol compiler (protoc) is installed
+// you can run
+// 	protoc --go_out=output_directory input_directory/file.proto
+// to generate Go bindings for the protocol defined by file.proto.
+// With that input, the output will be written to
+// 	output_directory/file.pb.go
+//
+// The generated code is documented in the package comment for
+// the library.
+//
+// See the README and documentation for protocol buffers to learn more:
+// 	https://developers.google.com/protocol-buffers/
+package main
+
+import (
+	"io/ioutil"
+	"os"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/generator"
+)
+
+func main() {
+	// Begin by allocating a generator. The request and response structures are stored there
+	// so we can do error handling easily - the response structure contains the field to
+	// report failure.
+	g := generator.New()
+
+	data, err := ioutil.ReadAll(os.Stdin)
+	if err != nil {
+		g.Error(err, "reading input")
+	}
+
+	if err := proto.Unmarshal(data, g.Request); err != nil {
+		g.Error(err, "parsing input proto")
+	}
+
+	if len(g.Request.FileToGenerate) == 0 {
+		g.Fail("no files to generate")
+	}
+
+	g.CommandLineParameters(g.Request.GetParameter())
+
+	// Create a wrapped version of the Descriptors and EnumDescriptors that
+	// point to the file that defines them.
+	g.WrapTypes()
+
+	g.SetPackageNames()
+	g.BuildTypeNameMap()
+
+	g.GenerateAllFiles()
+
+	// Send back the results.
+	data, err = proto.Marshal(g.Response)
+	if err != nil {
+		g.Error(err, "failed to marshal output proto")
+	}
+	_, err = os.Stdout.Write(data)
+	if err != nil {
+		g.Error(err, "failed to write output proto")
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden
new file mode 100644
index 0000000..8953d0f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden
@@ -0,0 +1,83 @@
+// Code generated by protoc-gen-go.
+// source: google/protobuf/compiler/plugin.proto
+// DO NOT EDIT!
+
+package google_protobuf_compiler
+
+import proto "github.com/golang/protobuf/proto"
+import "math"
+import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+// Reference proto and math imports to suppress error if they are not otherwise used.
+var _ = proto.GetString
+var _ = math.Inf
+
+type CodeGeneratorRequest struct {
+	FileToGenerate   []string                               `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"`
+	Parameter        *string                                `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
+	ProtoFile        []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"`
+	XXX_unrecognized []byte                                 `json:"-"`
+}
+
+func (this *CodeGeneratorRequest) Reset()         { *this = CodeGeneratorRequest{} }
+func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) }
+func (*CodeGeneratorRequest) ProtoMessage()       {}
+
+func (this *CodeGeneratorRequest) GetParameter() string {
+	if this != nil && this.Parameter != nil {
+		return *this.Parameter
+	}
+	return ""
+}
+
+type CodeGeneratorResponse struct {
+	Error            *string                       `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
+	File             []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
+	XXX_unrecognized []byte                        `json:"-"`
+}
+
+func (this *CodeGeneratorResponse) Reset()         { *this = CodeGeneratorResponse{} }
+func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) }
+func (*CodeGeneratorResponse) ProtoMessage()       {}
+
+func (this *CodeGeneratorResponse) GetError() string {
+	if this != nil && this.Error != nil {
+		return *this.Error
+	}
+	return ""
+}
+
+type CodeGeneratorResponse_File struct {
+	Name             *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	InsertionPoint   *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"`
+	Content          *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (this *CodeGeneratorResponse_File) Reset()         { *this = CodeGeneratorResponse_File{} }
+func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) }
+func (*CodeGeneratorResponse_File) ProtoMessage()       {}
+
+func (this *CodeGeneratorResponse_File) GetName() string {
+	if this != nil && this.Name != nil {
+		return *this.Name
+	}
+	return ""
+}
+
+func (this *CodeGeneratorResponse_File) GetInsertionPoint() string {
+	if this != nil && this.InsertionPoint != nil {
+		return *this.InsertionPoint
+	}
+	return ""
+}
+
+func (this *CodeGeneratorResponse_File) GetContent() string {
+	if this != nil && this.Content != nil {
+		return *this.Content
+	}
+	return ""
+}
+
+func init() {
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
new file mode 100644
index 0000000..5b55745
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
@@ -0,0 +1,167 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+//
+// WARNING:  The plugin interface is currently EXPERIMENTAL and is subject to
+//   change.
+//
+// protoc (aka the Protocol Compiler) can be extended via plugins.  A plugin is
+// just a program that reads a CodeGeneratorRequest from stdin and writes a
+// CodeGeneratorResponse to stdout.
+//
+// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead
+// of dealing with the raw protocol defined here.
+//
+// A plugin executable needs only to be placed somewhere in the path.  The
+// plugin should be named "protoc-gen-$NAME", and will then be used when the
+// flag "--${NAME}_out" is passed to protoc.
+
+syntax = "proto2";
+package google.protobuf.compiler;
+option java_package = "com.google.protobuf.compiler";
+option java_outer_classname = "PluginProtos";
+
+option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go";
+
+import "google/protobuf/descriptor.proto";
+
+// The version number of protocol compiler.
+message Version {
+  optional int32 major = 1;
+  optional int32 minor = 2;
+  optional int32 patch = 3;
+  // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
+  // be empty for mainline stable releases.
+  optional string suffix = 4;
+}
+
+// An encoded CodeGeneratorRequest is written to the plugin's stdin.
+message CodeGeneratorRequest {
+  // The .proto files that were explicitly listed on the command-line.  The
+  // code generator should generate code only for these files.  Each file's
+  // descriptor will be included in proto_file, below.
+  repeated string file_to_generate = 1;
+
+  // The generator parameter passed on the command-line.
+  optional string parameter = 2;
+
+  // FileDescriptorProtos for all files in files_to_generate and everything
+  // they import.  The files will appear in topological order, so each file
+  // appears before any file that imports it.
+  //
+  // protoc guarantees that all proto_files will be written after
+  // the fields above, even though this is not technically guaranteed by the
+  // protobuf wire format.  This theoretically could allow a plugin to stream
+  // in the FileDescriptorProtos and handle them one by one rather than read
+  // the entire set into memory at once.  However, as of this writing, this
+  // is not similarly optimized on protoc's end -- it will store all fields in
+  // memory at once before sending them to the plugin.
+  //
+  // Type names of fields and extensions in the FileDescriptorProto are always
+  // fully qualified.
+  repeated FileDescriptorProto proto_file = 15;
+
+  // The version number of protocol compiler.
+  optional Version compiler_version = 3;
+
+}
+
+// The plugin writes an encoded CodeGeneratorResponse to stdout.
+message CodeGeneratorResponse {
+  // Error message.  If non-empty, code generation failed.  The plugin process
+  // should exit with status code zero even if it reports an error in this way.
+  //
+  // This should be used to indicate errors in .proto files which prevent the
+  // code generator from generating correct code.  Errors which indicate a
+  // problem in protoc itself -- such as the input CodeGeneratorRequest being
+  // unparseable -- should be reported by writing a message to stderr and
+  // exiting with a non-zero status code.
+  optional string error = 1;
+
+  // Represents a single generated file.
+  message File {
+    // The file name, relative to the output directory.  The name must not
+    // contain "." or ".." components and must be relative, not be absolute (so,
+    // the file cannot lie outside the output directory).  "/" must be used as
+    // the path separator, not "\".
+    //
+    // If the name is omitted, the content will be appended to the previous
+    // file.  This allows the generator to break large files into small chunks,
+    // and allows the generated text to be streamed back to protoc so that large
+    // files need not reside completely in memory at one time.  Note that as of
+    // this writing protoc does not optimize for this -- it will read the entire
+    // CodeGeneratorResponse before writing files to disk.
+    optional string name = 1;
+
+    // If non-empty, indicates that the named file should already exist, and the
+    // content here is to be inserted into that file at a defined insertion
+    // point.  This feature allows a code generator to extend the output
+    // produced by another code generator.  The original generator may provide
+    // insertion points by placing special annotations in the file that look
+    // like:
+    //   @@protoc_insertion_point(NAME)
+    // The annotation can have arbitrary text before and after it on the line,
+    // which allows it to be placed in a comment.  NAME should be replaced with
+    // an identifier naming the point -- this is what other generators will use
+    // as the insertion_point.  Code inserted at this point will be placed
+    // immediately above the line containing the insertion point (thus multiple
+    // insertions to the same point will come out in the order they were added).
+    // The double-@ is intended to make it unlikely that the generated code
+    // could contain things that look like insertion points by accident.
+    //
+    // For example, the C++ code generator places the following line in the
+    // .pb.h files that it generates:
+    //   // @@protoc_insertion_point(namespace_scope)
+    // This line appears within the scope of the file's package namespace, but
+    // outside of any particular class.  Another plugin can then specify the
+    // insertion_point "namespace_scope" to generate additional classes or
+    // other declarations that should be placed in this scope.
+    //
+    // Note that if the line containing the insertion point begins with
+    // whitespace, the same whitespace will be added to every line of the
+    // inserted text.  This is useful for languages like Python, where
+    // indentation matters.  In these languages, the insertion point comment
+    // should be indented the same amount as any inserted code will need to be
+    // in order to work correctly in that context.
+    //
+    // The code generator that generates the initial file and the one which
+    // inserts into it must both run as part of a single invocation of protoc.
+    // Code generators are executed in the order in which they appear on the
+    // command line.
+    //
+    // If |insertion_point| is present, |name| must also be present.
+    optional string insertion_point = 2;
+
+    // The file contents.
+    optional string content = 15;
+  }
+  repeated File file = 15;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
new file mode 100644
index 0000000..70276e8
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -0,0 +1,141 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements functions to marshal proto.Message to/from
+// google.protobuf.Any message.
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes/any"
+)
+
+const googleApis = "type.googleapis.com/"
+
+// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
+//
+// Note that regular type assertions should be done using the Is
+// function. AnyMessageName is provided for less common use cases like filtering a
+// sequence of Any messages based on a set of allowed message type names.
+func AnyMessageName(any *any.Any) (string, error) {
+	if any == nil {
+		return "", fmt.Errorf("message is nil")
+	}
+	slash := strings.LastIndex(any.TypeUrl, "/")
+	if slash < 0 {
+		return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
+	}
+	return any.TypeUrl[slash+1:], nil
+}
+
+// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
+func MarshalAny(pb proto.Message) (*any.Any, error) {
+	value, err := proto.Marshal(pb)
+	if err != nil {
+		return nil, err
+	}
+	return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
+}
+
+// DynamicAny is a value that can be passed to UnmarshalAny to automatically
+// allocate a proto.Message for the type specified in a google.protobuf.Any
+// message. The allocated message is stored in the embedded proto.Message.
+//
+// Example:
+//
+//   var x ptypes.DynamicAny
+//   if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
+//   fmt.Printf("unmarshaled message: %v", x.Message)
+type DynamicAny struct {
+	proto.Message
+}
+
+// Empty returns a new proto.Message of the type specified in a
+// google.protobuf.Any message. It returns an error if corresponding message
+// type isn't linked in.
+func Empty(any *any.Any) (proto.Message, error) {
+	aname, err := AnyMessageName(any)
+	if err != nil {
+		return nil, err
+	}
+
+	t := proto.MessageType(aname)
+	if t == nil {
+		return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
+	}
+	return reflect.New(t.Elem()).Interface().(proto.Message), nil
+}
+
+// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
+// message and places the decoded result in pb. It returns an error if type of
+// contents of Any message does not match type of pb message.
+//
+// pb can be a proto.Message, or a *DynamicAny.
+func UnmarshalAny(any *any.Any, pb proto.Message) error {
+	if d, ok := pb.(*DynamicAny); ok {
+		if d.Message == nil {
+			var err error
+			d.Message, err = Empty(any)
+			if err != nil {
+				return err
+			}
+		}
+		return UnmarshalAny(any, d.Message)
+	}
+
+	aname, err := AnyMessageName(any)
+	if err != nil {
+		return err
+	}
+
+	mname := proto.MessageName(pb)
+	if aname != mname {
+		return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
+	}
+	return proto.Unmarshal(any.Value, pb)
+}
+
+// Is returns true if any value contains a given message type.
+func Is(any *any.Any, pb proto.Message) bool {
+	// The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
+	// but it avoids scanning TypeUrl for the slash.
+	if any == nil {
+		return false
+	}
+	name := proto.MessageName(pb)
+	prefix := len(any.TypeUrl) - len(name)
+	return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
new file mode 100644
index 0000000..c748667
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
@@ -0,0 +1,149 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "github.com/golang/protobuf/ptypes/any";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "AnyProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+//     Foo foo = ...;
+//     Any any;
+//     any.PackFrom(foo);
+//     ...
+//     if (any.UnpackTo(&foo)) {
+//       ...
+//     }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+//     Foo foo = ...;
+//     Any any = Any.pack(foo);
+//     ...
+//     if (any.is(Foo.class)) {
+//       foo = any.unpack(Foo.class);
+//     }
+//
+//  Example 3: Pack and unpack a message in Python.
+//
+//     foo = Foo(...)
+//     any = Any()
+//     any.Pack(foo)
+//     ...
+//     if any.Is(Foo.DESCRIPTOR):
+//       any.Unpack(foo)
+//       ...
+//
+//  Example 4: Pack and unpack a message in Go
+//
+//      foo := &pb.Foo{...}
+//      any, err := ptypes.MarshalAny(foo)
+//      ...
+//      foo := &pb.Foo{}
+//      if err := ptypes.UnmarshalAny(any, foo); err != nil {
+//        ...
+//      }
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+//     package google.profile;
+//     message Person {
+//       string first_name = 1;
+//       string last_name = 2;
+//     }
+//
+//     {
+//       "@type": "type.googleapis.com/google.profile.Person",
+//       "firstName": <string>,
+//       "lastName": <string>
+//     }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+//     {
+//       "@type": "type.googleapis.com/google.protobuf.Duration",
+//       "value": "1.212s"
+//     }
+//
+message Any {
+  // A URL/resource name whose content describes the type of the
+  // serialized protocol buffer message.
+  //
+  // For URLs which use the scheme `http`, `https`, or no scheme, the
+  // following restrictions and interpretations apply:
+  //
+  // * If no scheme is provided, `https` is assumed.
+  // * The last segment of the URL's path must represent the fully
+  //   qualified name of the type (as in `path/google.protobuf.Duration`).
+  //   The name should be in a canonical form (e.g., leading "." is
+  //   not accepted).
+  // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+  //   value in binary format, or produce an error.
+  // * Applications are allowed to cache lookup results based on the
+  //   URL, or have them precompiled into a binary to avoid any
+  //   lookup. Therefore, binary compatibility needs to be preserved
+  //   on changes to types. (Use versioned type names to manage
+  //   breaking changes.)
+  //
+  // Schemes other than `http`, `https` (or the empty scheme) might be
+  // used with implementation specific semantics.
+  //
+  string type_url = 1;
+
+  // Must be a valid serialized protocol buffer of the above specified type.
+  bytes value = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
new file mode 100644
index 0000000..c0d595d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/doc.go
@@ -0,0 +1,35 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package ptypes contains code for interacting with well-known types.
+*/
+package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
new file mode 100644
index 0000000..65cb0f8
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -0,0 +1,102 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements conversions between google.protobuf.Duration
+// and time.Duration.
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	durpb "github.com/golang/protobuf/ptypes/duration"
+)
+
+const (
+	// Range of a durpb.Duration in seconds, as specified in
+	// google/protobuf/duration.proto. This is about 10,000 years in seconds.
+	maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
+	minSeconds = -maxSeconds
+)
+
+// validateDuration determines whether the durpb.Duration is valid according to the
+// definition in google/protobuf/duration.proto. A valid durpb.Duration
+// may still be too large to fit into a time.Duration (the range of durpb.Duration
+// is about 10,000 years, and the range of time.Duration is about 290).
+func validateDuration(d *durpb.Duration) error {
+	if d == nil {
+		return errors.New("duration: nil Duration")
+	}
+	if d.Seconds < minSeconds || d.Seconds > maxSeconds {
+		return fmt.Errorf("duration: %v: seconds out of range", d)
+	}
+	if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
+		return fmt.Errorf("duration: %v: nanos out of range", d)
+	}
+	// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+	if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
+		return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
+	}
+	return nil
+}
+
+// Duration converts a durpb.Duration to a time.Duration. Duration
+// returns an error if the durpb.Duration is invalid or is too large to be
+// represented in a time.Duration.
+func Duration(p *durpb.Duration) (time.Duration, error) {
+	if err := validateDuration(p); err != nil {
+		return 0, err
+	}
+	d := time.Duration(p.Seconds) * time.Second
+	if int64(d/time.Second) != p.Seconds {
+		return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+	}
+	if p.Nanos != 0 {
+		d += time.Duration(p.Nanos)
+		if (d < 0) != (p.Nanos < 0) {
+			return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+		}
+	}
+	return d, nil
+}
+
+// DurationProto converts a time.Duration to a durpb.Duration.
+func DurationProto(d time.Duration) *durpb.Duration {
+	nanos := d.Nanoseconds()
+	secs := nanos / 1e9
+	nanos -= secs * 1e9
+	return &durpb.Duration{
+		Seconds: secs,
+		Nanos:   int32(nanos),
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
new file mode 100644
index 0000000..975fce4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
@@ -0,0 +1,117 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/duration";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DurationProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// # Examples
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+//     Timestamp start = ...;
+//     Timestamp end = ...;
+//     Duration duration = ...;
+//
+//     duration.seconds = end.seconds - start.seconds;
+//     duration.nanos = end.nanos - start.nanos;
+//
+//     if (duration.seconds < 0 && duration.nanos > 0) {
+//       duration.seconds += 1;
+//       duration.nanos -= 1000000000;
+//     } else if (durations.seconds > 0 && duration.nanos < 0) {
+//       duration.seconds -= 1;
+//       duration.nanos += 1000000000;
+//     }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+//     Timestamp start = ...;
+//     Duration duration = ...;
+//     Timestamp end = ...;
+//
+//     end.seconds = start.seconds + duration.seconds;
+//     end.nanos = start.nanos + duration.nanos;
+//
+//     if (end.nanos < 0) {
+//       end.seconds -= 1;
+//       end.nanos += 1000000000;
+//     } else if (end.nanos >= 1000000000) {
+//       end.seconds += 1;
+//       end.nanos -= 1000000000;
+//     }
+//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+//     td = datetime.timedelta(days=3, minutes=10)
+//     duration = Duration()
+//     duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
+//
+message Duration {
+
+  // Signed seconds of the span of time. Must be from -315,576,000,000
+  // to +315,576,000,000 inclusive. Note: these bounds are computed from:
+  // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
+  int64 seconds = 1;
+
+  // Signed fractions of a second at nanosecond resolution of the span
+  // of time. Durations less than one second are represented with a 0
+  // `seconds` field and a positive or negative `nanos` field. For durations
+  // of one second or more, a non-zero value for the `nanos` field must be
+  // of the same sign as the `seconds` field. Must be from -999,999,999
+  // to +999,999,999 inclusive.
+  int32 nanos = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
new file mode 100644
index 0000000..03cacd2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
@@ -0,0 +1,52 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "github.com/golang/protobuf/ptypes/empty";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "EmptyProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+option cc_enable_arenas = true;
+
+// A generic empty message that you can re-use to avoid defining duplicated
+// empty messages in your APIs. A typical example is to use it as the request
+// or the response type of an API method. For instance:
+//
+//     service Foo {
+//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+//     }
+//
+// The JSON representation for `Empty` is empty JSON object `{}`.
+message Empty {}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
new file mode 100644
index 0000000..47f10db
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -0,0 +1,134 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements operations on google.protobuf.Timestamp.
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	tspb "github.com/golang/protobuf/ptypes/timestamp"
+)
+
+const (
+	// Seconds field of the earliest valid Timestamp.
+	// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	minValidSeconds = -62135596800
+	// Seconds field just after the latest valid Timestamp.
+	// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	maxValidSeconds = 253402300800
+)
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range
+// [0001-01-01, 10000-01-01) and has a Nanos field
+// in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes
+// the problem.
+//
+// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
+func validateTimestamp(ts *tspb.Timestamp) error {
+	if ts == nil {
+		return errors.New("timestamp: nil Timestamp")
+	}
+	if ts.Seconds < minValidSeconds {
+		return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
+	}
+	if ts.Seconds >= maxValidSeconds {
+		return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
+	}
+	if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+		return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
+	}
+	return nil
+}
+
+// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
+// It returns an error if the argument is invalid.
+//
+// Unlike most Go functions, if Timestamp returns an error, the first return value
+// is not the zero time.Time. Instead, it is the value obtained from the
+// time.Unix function when passed the contents of the Timestamp, in the UTC
+// locale. This may or may not be a meaningful time; many invalid Timestamps
+// do map to valid time.Times.
+//
+// A nil Timestamp returns an error. The first return value in that case is
+// undefined.
+func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
+	// Don't return the zero value on error, because corresponds to a valid
+	// timestamp. Instead return whatever time.Unix gives us.
+	var t time.Time
+	if ts == nil {
+		t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
+	} else {
+		t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
+	}
+	return t, validateTimestamp(ts)
+}
+
+// TimestampNow returns a google.protobuf.Timestamp for the current time.
+func TimestampNow() *tspb.Timestamp {
+	ts, err := TimestampProto(time.Now())
+	if err != nil {
+		panic("ptypes: time.Now() out of Timestamp range")
+	}
+	return ts
+}
+
+// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
+// It returns an error if the resulting Timestamp is invalid.
+func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
+	seconds := t.Unix()
+	nanos := int32(t.Sub(time.Unix(seconds, 0)))
+	ts := &tspb.Timestamp{
+		Seconds: seconds,
+		Nanos:   nanos,
+	}
+	if err := validateTimestamp(ts); err != nil {
+		return nil, err
+	}
+	return ts, nil
+}
+
+// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
+// Timestamps, it returns an error message in parentheses.
+func TimestampString(ts *tspb.Timestamp) string {
+	t, err := Timestamp(ts)
+	if err != nil {
+		return fmt.Sprintf("(%v)", err)
+	}
+	return t.Format(time.RFC3339Nano)
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
new file mode 100644
index 0000000..06750ab
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
@@ -0,0 +1,133 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/timestamp";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "TimestampProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from  RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(time(NULL));
+//     timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+//     struct timeval tv;
+//     gettimeofday(&tv, NULL);
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(tv.tv_sec);
+//     timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+//     FILETIME ft;
+//     GetSystemTimeAsFileTime(&ft);
+//     UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+//     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+//     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+//     Timestamp timestamp;
+//     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+//     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+//     long millis = System.currentTimeMillis();
+//
+//     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+//         .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+//     timestamp = Timestamp()
+//     timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required, though only UTC (as indicated by "Z") is presently supported.
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
+// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
+// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--)
+// to obtain a formatter capable of generating timestamps in this format.
+//
+//
+message Timestamp {
+
+  // Represents seconds of UTC time since Unix epoch
+  // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+  // 9999-12-31T23:59:59Z inclusive.
+  int64 seconds = 1;
+
+  // Non-negative fractions of a second at nanosecond resolution. Negative
+  // second values with fractions must still have non-negative nanos values
+  // that count forward in time. Must be from 0 to 999,999,999
+  // inclusive.
+  int32 nanos = 2;
+}
diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore
new file mode 100644
index 0000000..042091d
--- /dev/null
+++ b/vendor/github.com/golang/snappy/.gitignore
@@ -0,0 +1,16 @@
+cmd/snappytool/snappytool
+testdata/bench
+
+# These explicitly listed benchmark data files are for an obsolete version of
+# snappy_test.go.
+testdata/alice29.txt
+testdata/asyoulik.txt
+testdata/fireworks.jpeg
+testdata/geo.protodata
+testdata/html
+testdata/html_x_4
+testdata/kppkn.gtb
+testdata/lcet10.txt
+testdata/paper-100k.pdf
+testdata/plrabn12.txt
+testdata/urls.10K
diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS
new file mode 100644
index 0000000..bcfa195
--- /dev/null
+++ b/vendor/github.com/golang/snappy/AUTHORS
@@ -0,0 +1,15 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+#	Name or Organization <email address>
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Damian Gryski <dgryski@gmail.com>
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Sebastien Binet <seb.binet@gmail.com>
diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS
new file mode 100644
index 0000000..931ae31
--- /dev/null
+++ b/vendor/github.com/golang/snappy/CONTRIBUTORS
@@ -0,0 +1,37 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people.  For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+#     http://code.google.com/legal/individual-cla-v1.0.html
+#     http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+#     Name <email address>
+
+# Please keep the list sorted.
+
+Damian Gryski <dgryski@gmail.com>
+Jan Mercl <0xjnml@gmail.com>
+Kai Backman <kaib@golang.org>
+Marc-Antoine Ruel <maruel@chromium.org>
+Nigel Tao <nigeltao@golang.org>
+Rob Pike <r@golang.org>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Russ Cox <rsc@golang.org>
+Sebastien Binet <seb.binet@gmail.com>
diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE
new file mode 100644
index 0000000..6050c10
--- /dev/null
+++ b/vendor/github.com/golang/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README
new file mode 100644
index 0000000..cea1287
--- /dev/null
+++ b/vendor/github.com/golang/snappy/README
@@ -0,0 +1,107 @@
+The Snappy compression format in the Go programming language.
+
+To download and install from source:
+$ go get github.com/golang/snappy
+
+Unless otherwise noted, the Snappy-Go source files are distributed
+under the BSD-style license found in the LICENSE file.
+
+
+
+Benchmarks.
+
+The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
+or so files, the same set used by the C++ Snappy code (github.com/google/snappy
+and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
+3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
+
+"go test -test.bench=."
+
+_UFlat0-8         2.19GB/s ± 0%  html
+_UFlat1-8         1.41GB/s ± 0%  urls
+_UFlat2-8         23.5GB/s ± 2%  jpg
+_UFlat3-8         1.91GB/s ± 0%  jpg_200
+_UFlat4-8         14.0GB/s ± 1%  pdf
+_UFlat5-8         1.97GB/s ± 0%  html4
+_UFlat6-8          814MB/s ± 0%  txt1
+_UFlat7-8          785MB/s ± 0%  txt2
+_UFlat8-8          857MB/s ± 0%  txt3
+_UFlat9-8          719MB/s ± 1%  txt4
+_UFlat10-8        2.84GB/s ± 0%  pb
+_UFlat11-8        1.05GB/s ± 0%  gaviota
+
+_ZFlat0-8         1.04GB/s ± 0%  html
+_ZFlat1-8          534MB/s ± 0%  urls
+_ZFlat2-8         15.7GB/s ± 1%  jpg
+_ZFlat3-8          740MB/s ± 3%  jpg_200
+_ZFlat4-8         9.20GB/s ± 1%  pdf
+_ZFlat5-8          991MB/s ± 0%  html4
+_ZFlat6-8          379MB/s ± 0%  txt1
+_ZFlat7-8          352MB/s ± 0%  txt2
+_ZFlat8-8          396MB/s ± 1%  txt3
+_ZFlat9-8          327MB/s ± 1%  txt4
+_ZFlat10-8        1.33GB/s ± 1%  pb
+_ZFlat11-8         605MB/s ± 1%  gaviota
+
+
+
+"go test -test.bench=. -tags=noasm"
+
+_UFlat0-8          621MB/s ± 2%  html
+_UFlat1-8          494MB/s ± 1%  urls
+_UFlat2-8         23.2GB/s ± 1%  jpg
+_UFlat3-8         1.12GB/s ± 1%  jpg_200
+_UFlat4-8         4.35GB/s ± 1%  pdf
+_UFlat5-8          609MB/s ± 0%  html4
+_UFlat6-8          296MB/s ± 0%  txt1
+_UFlat7-8          288MB/s ± 0%  txt2
+_UFlat8-8          309MB/s ± 1%  txt3
+_UFlat9-8          280MB/s ± 1%  txt4
+_UFlat10-8         753MB/s ± 0%  pb
+_UFlat11-8         400MB/s ± 0%  gaviota
+
+_ZFlat0-8          409MB/s ± 1%  html
+_ZFlat1-8          250MB/s ± 1%  urls
+_ZFlat2-8         12.3GB/s ± 1%  jpg
+_ZFlat3-8          132MB/s ± 0%  jpg_200
+_ZFlat4-8         2.92GB/s ± 0%  pdf
+_ZFlat5-8          405MB/s ± 1%  html4
+_ZFlat6-8          179MB/s ± 1%  txt1
+_ZFlat7-8          170MB/s ± 1%  txt2
+_ZFlat8-8          189MB/s ± 1%  txt3
+_ZFlat9-8          164MB/s ± 1%  txt4
+_ZFlat10-8         479MB/s ± 1%  pb
+_ZFlat11-8         270MB/s ± 1%  gaviota
+
+
+
+For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
+are the numbers from C++ Snappy's
+
+make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
+
+BM_UFlat/0     2.4GB/s  html
+BM_UFlat/1     1.4GB/s  urls
+BM_UFlat/2    21.8GB/s  jpg
+BM_UFlat/3     1.5GB/s  jpg_200
+BM_UFlat/4    13.3GB/s  pdf
+BM_UFlat/5     2.1GB/s  html4
+BM_UFlat/6     1.0GB/s  txt1
+BM_UFlat/7   959.4MB/s  txt2
+BM_UFlat/8     1.0GB/s  txt3
+BM_UFlat/9   864.5MB/s  txt4
+BM_UFlat/10    2.9GB/s  pb
+BM_UFlat/11    1.2GB/s  gaviota
+
+BM_ZFlat/0   944.3MB/s  html (22.31 %)
+BM_ZFlat/1   501.6MB/s  urls (47.78 %)
+BM_ZFlat/2    14.3GB/s  jpg (99.95 %)
+BM_ZFlat/3   538.3MB/s  jpg_200 (73.00 %)
+BM_ZFlat/4     8.3GB/s  pdf (83.30 %)
+BM_ZFlat/5   903.5MB/s  html4 (22.52 %)
+BM_ZFlat/6   336.0MB/s  txt1 (57.88 %)
+BM_ZFlat/7   312.3MB/s  txt2 (61.91 %)
+BM_ZFlat/8   353.1MB/s  txt3 (54.99 %)
+BM_ZFlat/9   289.9MB/s  txt4 (66.26 %)
+BM_ZFlat/10    1.2GB/s  pb (19.68 %)
+BM_ZFlat/11  527.4MB/s  gaviota (37.72 %)
diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go
new file mode 100644
index 0000000..72efb03
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode.go
@@ -0,0 +1,237 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+var (
+	// ErrCorrupt reports that the input is invalid.
+	ErrCorrupt = errors.New("snappy: corrupt input")
+	// ErrTooLarge reports that the uncompressed length is too large.
+	ErrTooLarge = errors.New("snappy: decoded block is too large")
+	// ErrUnsupported reports that the input isn't supported.
+	ErrUnsupported = errors.New("snappy: unsupported input")
+
+	errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+	v, _, err := decodedLen(src)
+	return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+	v, n := binary.Uvarint(src)
+	if n <= 0 || v > 0xffffffff {
+		return 0, 0, ErrCorrupt
+	}
+
+	const wordSize = 32 << (^uint(0) >> 32 & 1)
+	if wordSize == 32 && v > 0x7fffffff {
+		return 0, 0, ErrTooLarge
+	}
+	return int(v), n, nil
+}
+
+const (
+	decodeErrCodeCorrupt                  = 1
+	decodeErrCodeUnsupportedLiteralLength = 2
+)
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+	dLen, s, err := decodedLen(src)
+	if err != nil {
+		return nil, err
+	}
+	if dLen <= len(dst) {
+		dst = dst[:dLen]
+	} else {
+		dst = make([]byte, dLen)
+	}
+	switch decode(dst, src[s:]) {
+	case 0:
+		return dst, nil
+	case decodeErrCodeUnsupportedLiteralLength:
+		return nil, errUnsupportedLiteralLength
+	}
+	return nil, ErrCorrupt
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+	return &Reader{
+		r:       r,
+		decoded: make([]byte, maxBlockSize),
+		buf:     make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
+	}
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+type Reader struct {
+	r       io.Reader
+	err     error
+	decoded []byte
+	buf     []byte
+	// decoded[i:j] contains decoded bytes that have not yet been passed on.
+	i, j       int
+	readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+	r.r = reader
+	r.err = nil
+	r.i = 0
+	r.j = 0
+	r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
+	if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+		if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+			r.err = ErrCorrupt
+		}
+		return false
+	}
+	return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+	if r.err != nil {
+		return 0, r.err
+	}
+	for {
+		if r.i < r.j {
+			n := copy(p, r.decoded[r.i:r.j])
+			r.i += n
+			return n, nil
+		}
+		if !r.readFull(r.buf[:4], true) {
+			return 0, r.err
+		}
+		chunkType := r.buf[0]
+		if !r.readHeader {
+			if chunkType != chunkTypeStreamIdentifier {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.readHeader = true
+		}
+		chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+		if chunkLen > len(r.buf) {
+			r.err = ErrUnsupported
+			return 0, r.err
+		}
+
+		// The chunk types are specified at
+		// https://github.com/google/snappy/blob/master/framing_format.txt
+		switch chunkType {
+		case chunkTypeCompressedData:
+			// Section 4.2. Compressed data (chunk type 0x00).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			buf := r.buf[:chunkLen]
+			if !r.readFull(buf, false) {
+				return 0, r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			buf = buf[checksumSize:]
+
+			n, err := DecodedLen(buf)
+			if err != nil {
+				r.err = err
+				return 0, r.err
+			}
+			if n > len(r.decoded) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if _, err := Decode(r.decoded, buf); err != nil {
+				r.err = err
+				return 0, r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeUncompressedData:
+			// Section 4.3. Uncompressed data (chunk type 0x01).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			buf := r.buf[:checksumSize]
+			if !r.readFull(buf, false) {
+				return 0, r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			// Read directly into r.decoded instead of via r.buf.
+			n := chunkLen - checksumSize
+			if n > len(r.decoded) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if !r.readFull(r.decoded[:n], false) {
+				return 0, r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeStreamIdentifier:
+			// Section 4.1. Stream identifier (chunk type 0xff).
+			if chunkLen != len(magicBody) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if !r.readFull(r.buf[:len(magicBody)], false) {
+				return 0, r.err
+			}
+			for i := 0; i < len(magicBody); i++ {
+				if r.buf[i] != magicBody[i] {
+					r.err = ErrCorrupt
+					return 0, r.err
+				}
+			}
+			continue
+		}
+
+		if chunkType <= 0x7f {
+			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+			r.err = ErrUnsupported
+			return 0, r.err
+		}
+		// Section 4.4 Padding (chunk type 0xfe).
+		// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+		if !r.readFull(r.buf[:chunkLen], false) {
+			return 0, r.err
+		}
+	}
+}
diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go
new file mode 100644
index 0000000..fcd192b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// decode has the same semantics as in decode_other.go.
+//
+//go:noescape
+func decode(dst, src []byte) int
diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s
new file mode 100644
index 0000000..e6179f6
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.s
@@ -0,0 +1,490 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+//	- AX	scratch
+//	- BX	scratch
+//	- CX	length or x
+//	- DX	offset
+//	- SI	&src[s]
+//	- DI	&dst[d]
+//	+ R8	dst_base
+//	+ R9	dst_len
+//	+ R10	dst_base + dst_len
+//	+ R11	src_base
+//	+ R12	src_len
+//	+ R13	src_base + src_len
+//	- R14	used by doCopy
+//	- R15	used by doCopy
+//
+// The registers R8-R13 (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly DI - R8,  and len(dst)-d is R10 - DI.
+// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
+TEXT ·decode(SB), NOSPLIT, $48-56
+	// Initialize SI, DI and R8-R13.
+	MOVQ dst_base+0(FP), R8
+	MOVQ dst_len+8(FP), R9
+	MOVQ R8, DI
+	MOVQ R8, R10
+	ADDQ R9, R10
+	MOVQ src_base+24(FP), R11
+	MOVQ src_len+32(FP), R12
+	MOVQ R11, SI
+	MOVQ R11, R13
+	ADDQ R12, R13
+
+loop:
+	// for s < len(src)
+	CMPQ SI, R13
+	JEQ  end
+
+	// CX = uint32(src[s])
+	//
+	// switch src[s] & 0x03
+	MOVBLZX (SI), CX
+	MOVL    CX, BX
+	ANDL    $3, BX
+	CMPL    BX, $1
+	JAE     tagCopy
+
+	// ----------------------------------------
+	// The code below handles literal tags.
+
+	// case tagLiteral:
+	// x := uint32(src[s] >> 2)
+	// switch
+	SHRL $2, CX
+	CMPL CX, $60
+	JAE  tagLit60Plus
+
+	// case x < 60:
+	// s++
+	INCQ SI
+
+doLit:
+	// This is the end of the inner "switch", when we have a literal tag.
+	//
+	// We assume that CX == x and x fits in a uint32, where x is the variable
+	// used in the pure Go decode_other.go code.
+
+	// length = int(x) + 1
+	//
+	// Unlike the pure Go code, we don't need to check if length <= 0 because
+	// CX can hold 64 bits, so the increment cannot overflow.
+	INCQ CX
+
+	// Prepare to check if copying length bytes will run past the end of dst or
+	// src.
+	//
+	// AX = len(dst) - d
+	// BX = len(src) - s
+	MOVQ R10, AX
+	SUBQ DI, AX
+	MOVQ R13, BX
+	SUBQ SI, BX
+
+	// !!! Try a faster technique for short (16 or fewer bytes) copies.
+	//
+	// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+	//   goto callMemmove // Fall back on calling runtime·memmove.
+	// }
+	//
+	// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+	// against 21 instead of 16, because it cannot assume that all of its input
+	// is contiguous in memory and so it needs to leave enough source bytes to
+	// read the next tag without refilling buffers, but Go's Decode assumes
+	// contiguousness (the src argument is a []byte).
+	CMPQ CX, $16
+	JGT  callMemmove
+	CMPQ AX, $16
+	JLT  callMemmove
+	CMPQ BX, $16
+	JLT  callMemmove
+
+	// !!! Implement the copy from src to dst as a 16-byte load and store.
+	// (Decode's documentation says that dst and src must not overlap.)
+	//
+	// This always copies 16 bytes, instead of only length bytes, but that's
+	// OK. If the input is a valid Snappy encoding then subsequent iterations
+	// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+	// non-nil error), so the overrun will be ignored.
+	//
+	// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+	// 16-byte loads and stores. This technique probably wouldn't be as
+	// effective on architectures that are fussier about alignment.
+	MOVOU 0(SI), X0
+	MOVOU X0, 0(DI)
+
+	// d += length
+	// s += length
+	ADDQ CX, DI
+	ADDQ CX, SI
+	JMP  loop
+
+callMemmove:
+	// if length > len(dst)-d || length > len(src)-s { etc }
+	CMPQ CX, AX
+	JGT  errCorrupt
+	CMPQ CX, BX
+	JGT  errCorrupt
+
+	// copy(dst[d:], src[s:s+length])
+	//
+	// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+	// DI, SI and CX as arguments. Coincidentally, we also need to spill those
+	// three registers to the stack, to save local variables across the CALL.
+	MOVQ DI, 0(SP)
+	MOVQ SI, 8(SP)
+	MOVQ CX, 16(SP)
+	MOVQ DI, 24(SP)
+	MOVQ SI, 32(SP)
+	MOVQ CX, 40(SP)
+	CALL runtime·memmove(SB)
+
+	// Restore local variables: unspill registers from the stack and
+	// re-calculate R8-R13.
+	MOVQ 24(SP), DI
+	MOVQ 32(SP), SI
+	MOVQ 40(SP), CX
+	MOVQ dst_base+0(FP), R8
+	MOVQ dst_len+8(FP), R9
+	MOVQ R8, R10
+	ADDQ R9, R10
+	MOVQ src_base+24(FP), R11
+	MOVQ src_len+32(FP), R12
+	MOVQ R11, R13
+	ADDQ R12, R13
+
+	// d += length
+	// s += length
+	ADDQ CX, DI
+	ADDQ CX, SI
+	JMP  loop
+
+tagLit60Plus:
+	// !!! This fragment does the
+	//
+	// s += x - 58; if uint(s) > uint(len(src)) { etc }
+	//
+	// checks. In the asm version, we code it once instead of once per switch case.
+	ADDQ CX, SI
+	SUBQ $58, SI
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// case x == 60:
+	CMPL CX, $61
+	JEQ  tagLit61
+	JA   tagLit62Plus
+
+	// x = uint32(src[s-1])
+	MOVBLZX -1(SI), CX
+	JMP     doLit
+
+tagLit61:
+	// case x == 61:
+	// x = uint32(src[s-2]) | uint32(src[s-1])<<8
+	MOVWLZX -2(SI), CX
+	JMP     doLit
+
+tagLit62Plus:
+	CMPL CX, $62
+	JA   tagLit63
+
+	// case x == 62:
+	// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+	MOVWLZX -3(SI), CX
+	MOVBLZX -1(SI), BX
+	SHLL    $16, BX
+	ORL     BX, CX
+	JMP     doLit
+
+tagLit63:
+	// case x == 63:
+	// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+	MOVL -4(SI), CX
+	JMP  doLit
+
+// The code above handles literal tags.
+// ----------------------------------------
+// The code below handles copy tags.
+
+tagCopy4:
+	// case tagCopy4:
+	// s += 5
+	ADDQ $5, SI
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// length = 1 + int(src[s-5])>>2
+	SHRQ $2, CX
+	INCQ CX
+
+	// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+	MOVLQZX -4(SI), DX
+	JMP     doCopy
+
+tagCopy2:
+	// case tagCopy2:
+	// s += 3
+	ADDQ $3, SI
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// length = 1 + int(src[s-3])>>2
+	SHRQ $2, CX
+	INCQ CX
+
+	// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+	MOVWQZX -2(SI), DX
+	JMP     doCopy
+
+tagCopy:
+	// We have a copy tag. We assume that:
+	//	- BX == src[s] & 0x03
+	//	- CX == src[s]
+	CMPQ BX, $2
+	JEQ  tagCopy2
+	JA   tagCopy4
+
+	// case tagCopy1:
+	// s += 2
+	ADDQ $2, SI
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+	MOVQ    CX, DX
+	ANDQ    $0xe0, DX
+	SHLQ    $3, DX
+	MOVBQZX -1(SI), BX
+	ORQ     BX, DX
+
+	// length = 4 + int(src[s-2])>>2&0x7
+	SHRQ $2, CX
+	ANDQ $7, CX
+	ADDQ $4, CX
+
+doCopy:
+	// This is the end of the outer "switch", when we have a copy tag.
+	//
+	// We assume that:
+	//	- CX == length && CX > 0
+	//	- DX == offset
+
+	// if offset <= 0 { etc }
+	CMPQ DX, $0
+	JLE  errCorrupt
+
+	// if d < offset { etc }
+	MOVQ DI, BX
+	SUBQ R8, BX
+	CMPQ BX, DX
+	JLT  errCorrupt
+
+	// if length > len(dst)-d { etc }
+	MOVQ R10, BX
+	SUBQ DI, BX
+	CMPQ CX, BX
+	JGT  errCorrupt
+
+	// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+	//
+	// Set:
+	//	- R14 = len(dst)-d
+	//	- R15 = &dst[d-offset]
+	MOVQ R10, R14
+	SUBQ DI, R14
+	MOVQ DI, R15
+	SUBQ DX, R15
+
+	// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+	//
+	// First, try using two 8-byte load/stores, similar to the doLit technique
+	// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+	// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+	// and not one 16-byte load/store, and the first store has to be before the
+	// second load, due to the overlap if offset is in the range [8, 16).
+	//
+	// if length > 16 || offset < 8 || len(dst)-d < 16 {
+	//   goto slowForwardCopy
+	// }
+	// copy 16 bytes
+	// d += length
+	CMPQ CX, $16
+	JGT  slowForwardCopy
+	CMPQ DX, $8
+	JLT  slowForwardCopy
+	CMPQ R14, $16
+	JLT  slowForwardCopy
+	MOVQ 0(R15), AX
+	MOVQ AX, 0(DI)
+	MOVQ 8(R15), BX
+	MOVQ BX, 8(DI)
+	ADDQ CX, DI
+	JMP  loop
+
+slowForwardCopy:
+	// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+	// can still try 8-byte load stores, provided we can overrun up to 10 extra
+	// bytes. As above, the overrun will be fixed up by subsequent iterations
+	// of the outermost loop.
+	//
+	// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+	// commentary says:
+	//
+	// ----
+	//
+	// The main part of this loop is a simple copy of eight bytes at a time
+	// until we've copied (at least) the requested amount of bytes.  However,
+	// if d and d-offset are less than eight bytes apart (indicating a
+	// repeating pattern of length < 8), we first need to expand the pattern in
+	// order to get the correct results. For instance, if the buffer looks like
+	// this, with the eight-byte <d-offset> and <d> patterns marked as
+	// intervals:
+	//
+	//    abxxxxxxxxxxxx
+	//    [------]           d-offset
+	//      [------]         d
+	//
+	// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
+	// once, after which we can move <d> two bytes without moving <d-offset>:
+	//
+	//    ababxxxxxxxxxx
+	//    [------]           d-offset
+	//        [------]       d
+	//
+	// and repeat the exercise until the two no longer overlap.
+	//
+	// This allows us to do very well in the special case of one single byte
+	// repeated many times, without taking a big hit for more general cases.
+	//
+	// The worst case of extra writing past the end of the match occurs when
+	// offset == 1 and length == 1; the last copy will read from byte positions
+	// [0..7] and write to [4..11], whereas it was only supposed to write to
+	// position 1. Thus, ten excess bytes.
+	//
+	// ----
+	//
+	// That "10 byte overrun" worst case is confirmed by Go's
+	// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+	// and finishSlowForwardCopy algorithm.
+	//
+	// if length > len(dst)-d-10 {
+	//   goto verySlowForwardCopy
+	// }
+	SUBQ $10, R14
+	CMPQ CX, R14
+	JGT  verySlowForwardCopy
+
+makeOffsetAtLeast8:
+	// !!! As above, expand the pattern so that offset >= 8 and we can use
+	// 8-byte load/stores.
+	//
+	// for offset < 8 {
+	//   copy 8 bytes from dst[d-offset:] to dst[d:]
+	//   length -= offset
+	//   d      += offset
+	//   offset += offset
+	//   // The two previous lines together means that d-offset, and therefore
+	//   // R15, is unchanged.
+	// }
+	CMPQ DX, $8
+	JGE  fixUpSlowForwardCopy
+	MOVQ (R15), BX
+	MOVQ BX, (DI)
+	SUBQ DX, CX
+	ADDQ DX, DI
+	ADDQ DX, DX
+	JMP  makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+	// !!! Add length (which might be negative now) to d (implied by DI being
+	// &dst[d]) so that d ends up at the right place when we jump back to the
+	// top of the loop. Before we do that, though, we save DI to AX so that, if
+	// length is positive, copying the remaining length bytes will write to the
+	// right place.
+	MOVQ DI, AX
+	ADDQ CX, DI
+
+finishSlowForwardCopy:
+	// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+	// length means that we overrun, but as above, that will be fixed up by
+	// subsequent iterations of the outermost loop.
+	CMPQ CX, $0
+	JLE  loop
+	MOVQ (R15), BX
+	MOVQ BX, (AX)
+	ADDQ $8, R15
+	ADDQ $8, AX
+	SUBQ $8, CX
+	JMP  finishSlowForwardCopy
+
+verySlowForwardCopy:
+	// verySlowForwardCopy is a simple implementation of forward copy. In C
+	// parlance, this is a do/while loop instead of a while loop, since we know
+	// that length > 0. In Go syntax:
+	//
+	// for {
+	//   dst[d] = dst[d - offset]
+	//   d++
+	//   length--
+	//   if length == 0 {
+	//     break
+	//   }
+	// }
+	MOVB (R15), BX
+	MOVB BX, (DI)
+	INCQ R15
+	INCQ DI
+	DECQ CX
+	JNZ  verySlowForwardCopy
+	JMP  loop
+
+// The code above handles copy tags.
+// ----------------------------------------
+
+end:
+	// This is the end of the "for s < len(src)".
+	//
+	// if d != len(dst) { etc }
+	CMPQ DI, R10
+	JNE  errCorrupt
+
+	// return 0
+	MOVQ $0, ret+48(FP)
+	RET
+
+errCorrupt:
+	// return decodeErrCodeCorrupt
+	MOVQ $1, ret+48(FP)
+	RET
diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go
new file mode 100644
index 0000000..8c9f204
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_other.go
@@ -0,0 +1,101 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+// decode writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func decode(dst, src []byte) int {
+	var d, s, offset, length int
+	for s < len(src) {
+		switch src[s] & 0x03 {
+		case tagLiteral:
+			x := uint32(src[s] >> 2)
+			switch {
+			case x < 60:
+				s++
+			case x == 60:
+				s += 2
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-1])
+			case x == 61:
+				s += 3
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-2]) | uint32(src[s-1])<<8
+			case x == 62:
+				s += 4
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+			case x == 63:
+				s += 5
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+			}
+			length = int(x) + 1
+			if length <= 0 {
+				return decodeErrCodeUnsupportedLiteralLength
+			}
+			if length > len(dst)-d || length > len(src)-s {
+				return decodeErrCodeCorrupt
+			}
+			copy(dst[d:], src[s:s+length])
+			d += length
+			s += length
+			continue
+
+		case tagCopy1:
+			s += 2
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 4 + int(src[s-2])>>2&0x7
+			offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+
+		case tagCopy2:
+			s += 3
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 1 + int(src[s-3])>>2
+			offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+		case tagCopy4:
+			s += 5
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 1 + int(src[s-5])>>2
+			offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+		}
+
+		if offset <= 0 || d < offset || length > len(dst)-d {
+			return decodeErrCodeCorrupt
+		}
+		// Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
+		// the built-in copy function, this byte-by-byte copy always runs
+		// forwards, even if the slices overlap. Conceptually, this is:
+		//
+		// d += forwardCopy(dst[d:d+length], dst[d-offset:])
+		for end := d + length; d != end; d++ {
+			dst[d] = dst[d-offset]
+		}
+	}
+	if d != len(dst) {
+		return decodeErrCodeCorrupt
+	}
+	return 0
+}
diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go
new file mode 100644
index 0000000..8d393e9
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode.go
@@ -0,0 +1,285 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Encode(dst, src []byte) []byte {
+	if n := MaxEncodedLen(len(src)); n < 0 {
+		panic(ErrTooLarge)
+	} else if len(dst) < n {
+		dst = make([]byte, n)
+	}
+
+	// The block starts with the varint-encoded length of the decompressed bytes.
+	d := binary.PutUvarint(dst, uint64(len(src)))
+
+	for len(src) > 0 {
+		p := src
+		src = nil
+		if len(p) > maxBlockSize {
+			p, src = p[:maxBlockSize], p[maxBlockSize:]
+		}
+		if len(p) < minNonLiteralBlockSize {
+			d += emitLiteral(dst[d:], p)
+		} else {
+			d += encodeBlock(dst[d:], p)
+		}
+	}
+	return dst[:d]
+}
+
+// inputMargin is the minimum number of extra input bytes to keep, inside
+// encodeBlock's inner loop. On some architectures, this margin lets us
+// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
+// literals can be implemented as a single load to and store from a 16-byte
+// register. That literal's actual length can be as short as 1 byte, so this
+// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
+// the encoding loop will fix up the copy overrun, and this inputMargin ensures
+// that we don't overrun the dst and src buffers.
+const inputMargin = 16 - 1
+
+// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
+// could be encoded with a copy tag. This is the minimum with respect to the
+// algorithm used by encodeBlock, not a minimum enforced by the file format.
+//
+// The encoded output must start with at least a 1 byte literal, as there are
+// no previous bytes to copy. A minimal (1 byte) copy after that, generated
+// from an emitCopy call in encodeBlock's main loop, would require at least
+// another inputMargin bytes, for the reason above: we want any emitLiteral
+// calls inside encodeBlock's main loop to use the fast path if possible, which
+// requires being able to overrun by inputMargin bytes. Thus,
+// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
+//
+// The C++ code doesn't use this exact threshold, but it could, as discussed at
+// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
+// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
+// optimization. It should not affect the encoded form. This is tested by
+// TestSameEncodingAsCppShortCopies.
+const minNonLiteralBlockSize = 1 + 1 + inputMargin
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+func MaxEncodedLen(srcLen int) int {
+	n := uint64(srcLen)
+	if n > 0xffffffff {
+		return -1
+	}
+	// Compressed data can be defined as:
+	//    compressed := item* literal*
+	//    item       := literal* copy
+	//
+	// The trailing literal sequence has a space blowup of at most 62/60
+	// since a literal of length 60 needs one tag byte + one extra byte
+	// for length information.
+	//
+	// Item blowup is trickier to measure. Suppose the "copy" op copies
+	// 4 bytes of data. Because of a special check in the encoding code,
+	// we produce a 4-byte copy only if the offset is < 65536. Therefore
+	// the copy op takes 3 bytes to encode, and this type of item leads
+	// to at most the 62/60 blowup for representing literals.
+	//
+	// Suppose the "copy" op copies 5 bytes of data. If the offset is big
+	// enough, it will take 5 bytes to encode the copy op. Therefore the
+	// worst case here is a one-byte literal followed by a five-byte copy.
+	// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+	//
+	// This last factor dominates the blowup, so the final estimate is:
+	n = 32 + n + n/6
+	if n > 0xffffffff {
+		return -1
+	}
+	return int(n)
+}
+
+var errClosed = errors.New("snappy: Writer is closed")
+
+// NewWriter returns a new Writer that compresses to w.
+//
+// The Writer returned does not buffer writes. There is no need to Flush or
+// Close such a Writer.
+//
+// Deprecated: the Writer returned is not suitable for many small writes, only
+// for few large writes. Use NewBufferedWriter instead, which is efficient
+// regardless of the frequency and shape of the writes, and remember to Close
+// that Writer when done.
+func NewWriter(w io.Writer) *Writer {
+	return &Writer{
+		w:    w,
+		obuf: make([]byte, obufLen),
+	}
+}
+
+// NewBufferedWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// The Writer returned buffers writes. Users must call Close to guarantee all
+// data has been forwarded to the underlying io.Writer. They may also call
+// Flush zero or more times before calling Close.
+func NewBufferedWriter(w io.Writer) *Writer {
+	return &Writer{
+		w:    w,
+		ibuf: make([]byte, 0, maxBlockSize),
+		obuf: make([]byte, obufLen),
+	}
+}
+
+// Writer is an io.Writer that can write Snappy-compressed bytes.
+type Writer struct {
+	w   io.Writer
+	err error
+
+	// ibuf is a buffer for the incoming (uncompressed) bytes.
+	//
+	// Its use is optional. For backwards compatibility, Writers created by the
+	// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
+	// therefore do not need to be Flush'ed or Close'd.
+	ibuf []byte
+
+	// obuf is a buffer for the outgoing (compressed) bytes.
+	obuf []byte
+
+	// wroteStreamHeader is whether we have written the stream header.
+	wroteStreamHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+	w.w = writer
+	w.err = nil
+	if w.ibuf != nil {
+		w.ibuf = w.ibuf[:0]
+	}
+	w.wroteStreamHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (nRet int, errRet error) {
+	if w.ibuf == nil {
+		// Do not buffer incoming bytes. This does not perform or compress well
+		// if the caller of Writer.Write writes many small slices. This
+		// behavior is therefore deprecated, but still supported for backwards
+		// compatibility with code that doesn't explicitly Flush or Close.
+		return w.write(p)
+	}
+
+	// The remainder of this method is based on bufio.Writer.Write from the
+	// standard library.
+
+	for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
+		var n int
+		if len(w.ibuf) == 0 {
+			// Large write, empty buffer.
+			// Write directly from p to avoid copy.
+			n, _ = w.write(p)
+		} else {
+			n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+			w.ibuf = w.ibuf[:len(w.ibuf)+n]
+			w.Flush()
+		}
+		nRet += n
+		p = p[n:]
+	}
+	if w.err != nil {
+		return nRet, w.err
+	}
+	n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+	w.ibuf = w.ibuf[:len(w.ibuf)+n]
+	nRet += n
+	return nRet, nil
+}
+
+func (w *Writer) write(p []byte) (nRet int, errRet error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+	for len(p) > 0 {
+		obufStart := len(magicChunk)
+		if !w.wroteStreamHeader {
+			w.wroteStreamHeader = true
+			copy(w.obuf, magicChunk)
+			obufStart = 0
+		}
+
+		var uncompressed []byte
+		if len(p) > maxBlockSize {
+			uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
+		} else {
+			uncompressed, p = p, nil
+		}
+		checksum := crc(uncompressed)
+
+		// Compress the buffer, discarding the result if the improvement
+		// isn't at least 12.5%.
+		compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
+		chunkType := uint8(chunkTypeCompressedData)
+		chunkLen := 4 + len(compressed)
+		obufEnd := obufHeaderLen + len(compressed)
+		if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
+			chunkType = chunkTypeUncompressedData
+			chunkLen = 4 + len(uncompressed)
+			obufEnd = obufHeaderLen
+		}
+
+		// Fill in the per-chunk header that comes before the body.
+		w.obuf[len(magicChunk)+0] = chunkType
+		w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
+		w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
+		w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
+		w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
+		w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
+		w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
+		w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
+
+		if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
+			w.err = err
+			return nRet, err
+		}
+		if chunkType == chunkTypeUncompressedData {
+			if _, err := w.w.Write(uncompressed); err != nil {
+				w.err = err
+				return nRet, err
+			}
+		}
+		nRet += len(uncompressed)
+	}
+	return nRet, nil
+}
+
+// Flush flushes the Writer to its underlying io.Writer.
+func (w *Writer) Flush() error {
+	if w.err != nil {
+		return w.err
+	}
+	if len(w.ibuf) == 0 {
+		return nil
+	}
+	w.write(w.ibuf)
+	w.ibuf = w.ibuf[:0]
+	return w.err
+}
+
+// Close calls Flush and then closes the Writer.
+func (w *Writer) Close() error {
+	w.Flush()
+	ret := w.err
+	if w.err == nil {
+		w.err = errClosed
+	}
+	return ret
+}
diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go
new file mode 100644
index 0000000..150d91b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// emitLiteral has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitLiteral(dst, lit []byte) int
+
+// emitCopy has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitCopy(dst []byte, offset, length int) int
+
+// extendMatch has the same semantics as in encode_other.go.
+//
+//go:noescape
+func extendMatch(src []byte, i, j int) int
+
+// encodeBlock has the same semantics as in encode_other.go.
+//
+//go:noescape
+func encodeBlock(dst, src []byte) (d int)
diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s
new file mode 100644
index 0000000..adfd979
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.s
@@ -0,0 +1,730 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
+// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
+// https://github.com/golang/snappy/issues/29
+//
+// As a workaround, the package was built with a known good assembler, and
+// those instructions were disassembled by "objdump -d" to yield the
+//	4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15
+// style comments, in AT&T asm syntax. Note that rsp here is a physical
+// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
+// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
+// fine on Go 1.6.
+
+// The asm code generally follows the pure Go code in encode_other.go, except
+// where marked with a "!!!".
+
+// ----------------------------------------------------------------------------
+
+// func emitLiteral(dst, lit []byte) int
+//
+// All local variables fit into registers. The register allocation:
+//	- AX	len(lit)
+//	- BX	n
+//	- DX	return value
+//	- DI	&dst[i]
+//	- R10	&lit[0]
+//
+// The 24 bytes of stack space is to call runtime·memmove.
+//
+// The unusual register allocation of local variables, such as R10 for the
+// source pointer, matches the allocation used at the call site in encodeBlock,
+// which makes it easier to manually inline this function.
+TEXT ·emitLiteral(SB), NOSPLIT, $24-56
+	MOVQ dst_base+0(FP), DI
+	MOVQ lit_base+24(FP), R10
+	MOVQ lit_len+32(FP), AX
+	MOVQ AX, DX
+	MOVL AX, BX
+	SUBL $1, BX
+
+	CMPL BX, $60
+	JLT  oneByte
+	CMPL BX, $256
+	JLT  twoBytes
+
+threeBytes:
+	MOVB $0xf4, 0(DI)
+	MOVW BX, 1(DI)
+	ADDQ $3, DI
+	ADDQ $3, DX
+	JMP  memmove
+
+twoBytes:
+	MOVB $0xf0, 0(DI)
+	MOVB BX, 1(DI)
+	ADDQ $2, DI
+	ADDQ $2, DX
+	JMP  memmove
+
+oneByte:
+	SHLB $2, BX
+	MOVB BX, 0(DI)
+	ADDQ $1, DI
+	ADDQ $1, DX
+
+memmove:
+	MOVQ DX, ret+48(FP)
+
+	// copy(dst[i:], lit)
+	//
+	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+	// DI, R10 and AX as arguments.
+	MOVQ DI, 0(SP)
+	MOVQ R10, 8(SP)
+	MOVQ AX, 16(SP)
+	CALL runtime·memmove(SB)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func emitCopy(dst []byte, offset, length int) int
+//
+// All local variables fit into registers. The register allocation:
+//	- AX	length
+//	- SI	&dst[0]
+//	- DI	&dst[i]
+//	- R11	offset
+//
+// The unusual register allocation of local variables, such as R11 for the
+// offset, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+	MOVQ dst_base+0(FP), DI
+	MOVQ DI, SI
+	MOVQ offset+24(FP), R11
+	MOVQ length+32(FP), AX
+
+loop0:
+	// for length >= 68 { etc }
+	CMPL AX, $68
+	JLT  step1
+
+	// Emit a length 64 copy, encoded as 3 bytes.
+	MOVB $0xfe, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $64, AX
+	JMP  loop0
+
+step1:
+	// if length > 64 { etc }
+	CMPL AX, $64
+	JLE  step2
+
+	// Emit a length 60 copy, encoded as 3 bytes.
+	MOVB $0xee, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $60, AX
+
+step2:
+	// if length >= 12 || offset >= 2048 { goto step3 }
+	CMPL AX, $12
+	JGE  step3
+	CMPL R11, $2048
+	JGE  step3
+
+	// Emit the remaining copy, encoded as 2 bytes.
+	MOVB R11, 1(DI)
+	SHRL $8, R11
+	SHLB $5, R11
+	SUBB $4, AX
+	SHLB $2, AX
+	ORB  AX, R11
+	ORB  $1, R11
+	MOVB R11, 0(DI)
+	ADDQ $2, DI
+
+	// Return the number of bytes written.
+	SUBQ SI, DI
+	MOVQ DI, ret+40(FP)
+	RET
+
+step3:
+	// Emit the remaining copy, encoded as 3 bytes.
+	SUBL $1, AX
+	SHLB $2, AX
+	ORB  $2, AX
+	MOVB AX, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+
+	// Return the number of bytes written.
+	SUBQ SI, DI
+	MOVQ DI, ret+40(FP)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func extendMatch(src []byte, i, j int) int
+//
+// All local variables fit into registers. The register allocation:
+//	- DX	&src[0]
+//	- SI	&src[j]
+//	- R13	&src[len(src) - 8]
+//	- R14	&src[len(src)]
+//	- R15	&src[i]
+//
+// The unusual register allocation of local variables, such as R15 for a source
+// pointer, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·extendMatch(SB), NOSPLIT, $0-48
+	MOVQ src_base+0(FP), DX
+	MOVQ src_len+8(FP), R14
+	MOVQ i+24(FP), R15
+	MOVQ j+32(FP), SI
+	ADDQ DX, R14
+	ADDQ DX, R15
+	ADDQ DX, SI
+	MOVQ R14, R13
+	SUBQ $8, R13
+
+cmp8:
+	// As long as we are 8 or more bytes before the end of src, we can load and
+	// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+	CMPQ SI, R13
+	JA   cmp1
+	MOVQ (R15), AX
+	MOVQ (SI), BX
+	CMPQ AX, BX
+	JNE  bsf
+	ADDQ $8, R15
+	ADDQ $8, SI
+	JMP  cmp8
+
+bsf:
+	// If those 8 bytes were not equal, XOR the two 8 byte values, and return
+	// the index of the first byte that differs. The BSF instruction finds the
+	// least significant 1 bit, the amd64 architecture is little-endian, and
+	// the shift by 3 converts a bit index to a byte index.
+	XORQ AX, BX
+	BSFQ BX, BX
+	SHRQ $3, BX
+	ADDQ BX, SI
+
+	// Convert from &src[ret] to ret.
+	SUBQ DX, SI
+	MOVQ SI, ret+40(FP)
+	RET
+
+cmp1:
+	// In src's tail, compare 1 byte at a time.
+	CMPQ SI, R14
+	JAE  extendMatchEnd
+	MOVB (R15), AX
+	MOVB (SI), BX
+	CMPB AX, BX
+	JNE  extendMatchEnd
+	ADDQ $1, R15
+	ADDQ $1, SI
+	JMP  cmp1
+
+extendMatchEnd:
+	// Convert from &src[ret] to ret.
+	SUBQ DX, SI
+	MOVQ SI, ret+40(FP)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func encodeBlock(dst, src []byte) (d int)
+//
+// All local variables fit into registers, other than "var table". The register
+// allocation:
+//	- AX	.	.
+//	- BX	.	.
+//	- CX	56	shift (note that amd64 shifts by non-immediates must use CX).
+//	- DX	64	&src[0], tableSize
+//	- SI	72	&src[s]
+//	- DI	80	&dst[d]
+//	- R9	88	sLimit
+//	- R10	.	&src[nextEmit]
+//	- R11	96	prevHash, currHash, nextHash, offset
+//	- R12	104	&src[base], skip
+//	- R13	.	&src[nextS], &src[len(src) - 8]
+//	- R14	.	len(src), bytesBetweenHashLookups, &src[len(src)], x
+//	- R15	112	candidate
+//
+// The second column (56, 64, etc) is the stack offset to spill the registers
+// when calling other functions. We could pack this slightly tighter, but it's
+// simpler to have a dedicated spill map independent of the function called.
+//
+// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
+// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
+// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
+TEXT ·encodeBlock(SB), 0, $32888-56
+	MOVQ dst_base+0(FP), DI
+	MOVQ src_base+24(FP), SI
+	MOVQ src_len+32(FP), R14
+
+	// shift, tableSize := uint32(32-8), 1<<8
+	MOVQ $24, CX
+	MOVQ $256, DX
+
+calcShift:
+	// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+	//	shift--
+	// }
+	CMPQ DX, $16384
+	JGE  varTable
+	CMPQ DX, R14
+	JGE  varTable
+	SUBQ $1, CX
+	SHLQ $1, DX
+	JMP  calcShift
+
+varTable:
+	// var table [maxTableSize]uint16
+	//
+	// In the asm code, unlike the Go code, we can zero-initialize only the
+	// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
+	// writes 16 bytes, so we can do only tableSize/8 writes instead of the
+	// 2048 writes that would zero-initialize all of table's 32768 bytes.
+	SHRQ $3, DX
+	LEAQ table-32768(SP), BX
+	PXOR X0, X0
+
+memclr:
+	MOVOU X0, 0(BX)
+	ADDQ  $16, BX
+	SUBQ  $1, DX
+	JNZ   memclr
+
+	// !!! DX = &src[0]
+	MOVQ SI, DX
+
+	// sLimit := len(src) - inputMargin
+	MOVQ R14, R9
+	SUBQ $15, R9
+
+	// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
+	// change for the rest of the function.
+	MOVQ CX, 56(SP)
+	MOVQ DX, 64(SP)
+	MOVQ R9, 88(SP)
+
+	// nextEmit := 0
+	MOVQ DX, R10
+
+	// s := 1
+	ADDQ $1, SI
+
+	// nextHash := hash(load32(src, s), shift)
+	MOVL  0(SI), R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+outer:
+	// for { etc }
+
+	// skip := 32
+	MOVQ $32, R12
+
+	// nextS := s
+	MOVQ SI, R13
+
+	// candidate := 0
+	MOVQ $0, R15
+
+inner0:
+	// for { etc }
+
+	// s := nextS
+	MOVQ R13, SI
+
+	// bytesBetweenHashLookups := skip >> 5
+	MOVQ R12, R14
+	SHRQ $5, R14
+
+	// nextS = s + bytesBetweenHashLookups
+	ADDQ R14, R13
+
+	// skip += bytesBetweenHashLookups
+	ADDQ R14, R12
+
+	// if nextS > sLimit { goto emitRemainder }
+	MOVQ R13, AX
+	SUBQ DX, AX
+	CMPQ AX, R9
+	JA   emitRemainder
+
+	// candidate = int(table[nextHash])
+	// XXX: MOVWQZX table-32768(SP)(R11*2), R15
+	// XXX: 4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15
+	BYTE $0x4e
+	BYTE $0x0f
+	BYTE $0xb7
+	BYTE $0x7c
+	BYTE $0x5c
+	BYTE $0x78
+
+	// table[nextHash] = uint16(s)
+	MOVQ SI, AX
+	SUBQ DX, AX
+
+	// XXX: MOVW AX, table-32768(SP)(R11*2)
+	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2)
+	BYTE $0x66
+	BYTE $0x42
+	BYTE $0x89
+	BYTE $0x44
+	BYTE $0x5c
+	BYTE $0x78
+
+	// nextHash = hash(load32(src, nextS), shift)
+	MOVL  0(R13), R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// if load32(src, s) != load32(src, candidate) { continue } break
+	MOVL 0(SI), AX
+	MOVL (DX)(R15*1), BX
+	CMPL AX, BX
+	JNE  inner0
+
+fourByteMatch:
+	// As per the encode_other.go code:
+	//
+	// A 4-byte match has been found. We'll later see etc.
+
+	// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
+	// on inputMargin in encode.go.
+	MOVQ SI, AX
+	SUBQ R10, AX
+	CMPQ AX, $16
+	JLE  emitLiteralFastPath
+
+	// ----------------------------------------
+	// Begin inline of the emitLiteral call.
+	//
+	// d += emitLiteral(dst[d:], src[nextEmit:s])
+
+	MOVL AX, BX
+	SUBL $1, BX
+
+	CMPL BX, $60
+	JLT  inlineEmitLiteralOneByte
+	CMPL BX, $256
+	JLT  inlineEmitLiteralTwoBytes
+
+inlineEmitLiteralThreeBytes:
+	MOVB $0xf4, 0(DI)
+	MOVW BX, 1(DI)
+	ADDQ $3, DI
+	JMP  inlineEmitLiteralMemmove
+
+inlineEmitLiteralTwoBytes:
+	MOVB $0xf0, 0(DI)
+	MOVB BX, 1(DI)
+	ADDQ $2, DI
+	JMP  inlineEmitLiteralMemmove
+
+inlineEmitLiteralOneByte:
+	SHLB $2, BX
+	MOVB BX, 0(DI)
+	ADDQ $1, DI
+
+inlineEmitLiteralMemmove:
+	// Spill local variables (registers) onto the stack; call; unspill.
+	//
+	// copy(dst[i:], lit)
+	//
+	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+	// DI, R10 and AX as arguments.
+	MOVQ DI, 0(SP)
+	MOVQ R10, 8(SP)
+	MOVQ AX, 16(SP)
+	ADDQ AX, DI              // Finish the "d +=" part of "d += emitLiteral(etc)".
+	MOVQ SI, 72(SP)
+	MOVQ DI, 80(SP)
+	MOVQ R15, 112(SP)
+	CALL runtime·memmove(SB)
+	MOVQ 56(SP), CX
+	MOVQ 64(SP), DX
+	MOVQ 72(SP), SI
+	MOVQ 80(SP), DI
+	MOVQ 88(SP), R9
+	MOVQ 112(SP), R15
+	JMP  inner1
+
+inlineEmitLiteralEnd:
+	// End inline of the emitLiteral call.
+	// ----------------------------------------
+
+emitLiteralFastPath:
+	// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
+	MOVB AX, BX
+	SUBB $1, BX
+	SHLB $2, BX
+	MOVB BX, (DI)
+	ADDQ $1, DI
+
+	// !!! Implement the copy from lit to dst as a 16-byte load and store.
+	// (Encode's documentation says that dst and src must not overlap.)
+	//
+	// This always copies 16 bytes, instead of only len(lit) bytes, but that's
+	// OK. Subsequent iterations will fix up the overrun.
+	//
+	// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+	// 16-byte loads and stores. This technique probably wouldn't be as
+	// effective on architectures that are fussier about alignment.
+	MOVOU 0(R10), X0
+	MOVOU X0, 0(DI)
+	ADDQ  AX, DI
+
+inner1:
+	// for { etc }
+
+	// base := s
+	MOVQ SI, R12
+
+	// !!! offset := base - candidate
+	MOVQ R12, R11
+	SUBQ R15, R11
+	SUBQ DX, R11
+
+	// ----------------------------------------
+	// Begin inline of the extendMatch call.
+	//
+	// s = extendMatch(src, candidate+4, s+4)
+
+	// !!! R14 = &src[len(src)]
+	MOVQ src_len+32(FP), R14
+	ADDQ DX, R14
+
+	// !!! R13 = &src[len(src) - 8]
+	MOVQ R14, R13
+	SUBQ $8, R13
+
+	// !!! R15 = &src[candidate + 4]
+	ADDQ $4, R15
+	ADDQ DX, R15
+
+	// !!! s += 4
+	ADDQ $4, SI
+
+inlineExtendMatchCmp8:
+	// As long as we are 8 or more bytes before the end of src, we can load and
+	// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+	CMPQ SI, R13
+	JA   inlineExtendMatchCmp1
+	MOVQ (R15), AX
+	MOVQ (SI), BX
+	CMPQ AX, BX
+	JNE  inlineExtendMatchBSF
+	ADDQ $8, R15
+	ADDQ $8, SI
+	JMP  inlineExtendMatchCmp8
+
+inlineExtendMatchBSF:
+	// If those 8 bytes were not equal, XOR the two 8 byte values, and return
+	// the index of the first byte that differs. The BSF instruction finds the
+	// least significant 1 bit, the amd64 architecture is little-endian, and
+	// the shift by 3 converts a bit index to a byte index.
+	XORQ AX, BX
+	BSFQ BX, BX
+	SHRQ $3, BX
+	ADDQ BX, SI
+	JMP  inlineExtendMatchEnd
+
+inlineExtendMatchCmp1:
+	// In src's tail, compare 1 byte at a time.
+	CMPQ SI, R14
+	JAE  inlineExtendMatchEnd
+	MOVB (R15), AX
+	MOVB (SI), BX
+	CMPB AX, BX
+	JNE  inlineExtendMatchEnd
+	ADDQ $1, R15
+	ADDQ $1, SI
+	JMP  inlineExtendMatchCmp1
+
+inlineExtendMatchEnd:
+	// End inline of the extendMatch call.
+	// ----------------------------------------
+
+	// ----------------------------------------
+	// Begin inline of the emitCopy call.
+	//
+	// d += emitCopy(dst[d:], base-candidate, s-base)
+
+	// !!! length := s - base
+	MOVQ SI, AX
+	SUBQ R12, AX
+
+inlineEmitCopyLoop0:
+	// for length >= 68 { etc }
+	CMPL AX, $68
+	JLT  inlineEmitCopyStep1
+
+	// Emit a length 64 copy, encoded as 3 bytes.
+	MOVB $0xfe, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $64, AX
+	JMP  inlineEmitCopyLoop0
+
+inlineEmitCopyStep1:
+	// if length > 64 { etc }
+	CMPL AX, $64
+	JLE  inlineEmitCopyStep2
+
+	// Emit a length 60 copy, encoded as 3 bytes.
+	MOVB $0xee, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $60, AX
+
+inlineEmitCopyStep2:
+	// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
+	CMPL AX, $12
+	JGE  inlineEmitCopyStep3
+	CMPL R11, $2048
+	JGE  inlineEmitCopyStep3
+
+	// Emit the remaining copy, encoded as 2 bytes.
+	MOVB R11, 1(DI)
+	SHRL $8, R11
+	SHLB $5, R11
+	SUBB $4, AX
+	SHLB $2, AX
+	ORB  AX, R11
+	ORB  $1, R11
+	MOVB R11, 0(DI)
+	ADDQ $2, DI
+	JMP  inlineEmitCopyEnd
+
+inlineEmitCopyStep3:
+	// Emit the remaining copy, encoded as 3 bytes.
+	SUBL $1, AX
+	SHLB $2, AX
+	ORB  $2, AX
+	MOVB AX, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+
+inlineEmitCopyEnd:
+	// End inline of the emitCopy call.
+	// ----------------------------------------
+
+	// nextEmit = s
+	MOVQ SI, R10
+
+	// if s >= sLimit { goto emitRemainder }
+	MOVQ SI, AX
+	SUBQ DX, AX
+	CMPQ AX, R9
+	JAE  emitRemainder
+
+	// As per the encode_other.go code:
+	//
+	// We could immediately etc.
+
+	// x := load64(src, s-1)
+	MOVQ -1(SI), R14
+
+	// prevHash := hash(uint32(x>>0), shift)
+	MOVL  R14, R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// table[prevHash] = uint16(s-1)
+	MOVQ SI, AX
+	SUBQ DX, AX
+	SUBQ $1, AX
+
+	// XXX: MOVW AX, table-32768(SP)(R11*2)
+	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2)
+	BYTE $0x66
+	BYTE $0x42
+	BYTE $0x89
+	BYTE $0x44
+	BYTE $0x5c
+	BYTE $0x78
+
+	// currHash := hash(uint32(x>>8), shift)
+	SHRQ  $8, R14
+	MOVL  R14, R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// candidate = int(table[currHash])
+	// XXX: MOVWQZX table-32768(SP)(R11*2), R15
+	// XXX: 4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15
+	BYTE $0x4e
+	BYTE $0x0f
+	BYTE $0xb7
+	BYTE $0x7c
+	BYTE $0x5c
+	BYTE $0x78
+
+	// table[currHash] = uint16(s)
+	ADDQ $1, AX
+
+	// XXX: MOVW AX, table-32768(SP)(R11*2)
+	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2)
+	BYTE $0x66
+	BYTE $0x42
+	BYTE $0x89
+	BYTE $0x44
+	BYTE $0x5c
+	BYTE $0x78
+
+	// if uint32(x>>8) == load32(src, candidate) { continue }
+	MOVL (DX)(R15*1), BX
+	CMPL R14, BX
+	JEQ  inner1
+
+	// nextHash = hash(uint32(x>>16), shift)
+	SHRQ  $8, R14
+	MOVL  R14, R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// s++
+	ADDQ $1, SI
+
+	// break out of the inner1 for loop, i.e. continue the outer loop.
+	JMP outer
+
+emitRemainder:
+	// if nextEmit < len(src) { etc }
+	MOVQ src_len+32(FP), AX
+	ADDQ DX, AX
+	CMPQ R10, AX
+	JEQ  encodeBlockEnd
+
+	// d += emitLiteral(dst[d:], src[nextEmit:])
+	//
+	// Push args.
+	MOVQ DI, 0(SP)
+	MOVQ $0, 8(SP)   // Unnecessary, as the callee ignores it, but conservative.
+	MOVQ $0, 16(SP)  // Unnecessary, as the callee ignores it, but conservative.
+	MOVQ R10, 24(SP)
+	SUBQ R10, AX
+	MOVQ AX, 32(SP)
+	MOVQ AX, 40(SP)  // Unnecessary, as the callee ignores it, but conservative.
+
+	// Spill local variables (registers) onto the stack; call; unspill.
+	MOVQ DI, 80(SP)
+	CALL ·emitLiteral(SB)
+	MOVQ 80(SP), DI
+
+	// Finish the "d +=" part of "d += emitLiteral(etc)".
+	ADDQ 48(SP), DI
+
+encodeBlockEnd:
+	MOVQ dst_base+0(FP), AX
+	SUBQ AX, DI
+	MOVQ DI, d+48(FP)
+	RET
diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go
new file mode 100644
index 0000000..dbcae90
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_other.go
@@ -0,0 +1,238 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+func load32(b []byte, i int) uint32 {
+	b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+	return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+	b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+	return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+		uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+//	dst is long enough to hold the encoded bytes
+//	1 <= len(lit) && len(lit) <= 65536
+func emitLiteral(dst, lit []byte) int {
+	i, n := 0, uint(len(lit)-1)
+	switch {
+	case n < 60:
+		dst[0] = uint8(n)<<2 | tagLiteral
+		i = 1
+	case n < 1<<8:
+		dst[0] = 60<<2 | tagLiteral
+		dst[1] = uint8(n)
+		i = 2
+	default:
+		dst[0] = 61<<2 | tagLiteral
+		dst[1] = uint8(n)
+		dst[2] = uint8(n >> 8)
+		i = 3
+	}
+	return i + copy(dst[i:], lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+//	dst is long enough to hold the encoded bytes
+//	1 <= offset && offset <= 65535
+//	4 <= length && length <= 65535
+func emitCopy(dst []byte, offset, length int) int {
+	i := 0
+	// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
+	// threshold for this loop is a little higher (at 68 = 64 + 4), and the
+	// length emitted down below is is a little lower (at 60 = 64 - 4), because
+	// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
+	// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
+	// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
+	// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
+	// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
+	// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
+	for length >= 68 {
+		// Emit a length 64 copy, encoded as 3 bytes.
+		dst[i+0] = 63<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		i += 3
+		length -= 64
+	}
+	if length > 64 {
+		// Emit a length 60 copy, encoded as 3 bytes.
+		dst[i+0] = 59<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		i += 3
+		length -= 60
+	}
+	if length >= 12 || offset >= 2048 {
+		// Emit the remaining copy, encoded as 3 bytes.
+		dst[i+0] = uint8(length-1)<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		return i + 3
+	}
+	// Emit the remaining copy, encoded as 2 bytes.
+	dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+	dst[i+1] = uint8(offset)
+	return i + 2
+}
+
+// extendMatch returns the largest k such that k <= len(src) and that
+// src[i:i+k-j] and src[j:k] have the same contents.
+//
+// It assumes that:
+//	0 <= i && i < j && j <= len(src)
+func extendMatch(src []byte, i, j int) int {
+	for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
+	}
+	return j
+}
+
+func hash(u, shift uint32) uint32 {
+	return (u * 0x1e35a7bd) >> shift
+}
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//	len(dst) >= MaxEncodedLen(len(src)) &&
+// 	minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlock(dst, src []byte) (d int) {
+	// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+	// The table element type is uint16, as s < sLimit and sLimit < len(src)
+	// and len(src) <= maxBlockSize and maxBlockSize == 65536.
+	const (
+		maxTableSize = 1 << 14
+		// tableMask is redundant, but helps the compiler eliminate bounds
+		// checks.
+		tableMask = maxTableSize - 1
+	)
+	shift := uint32(32 - 8)
+	for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+		shift--
+	}
+	// In Go, all array elements are zero-initialized, so there is no advantage
+	// to a smaller tableSize per se. However, it matches the C++ algorithm,
+	// and in the asm versions of this code, we can get away with zeroing only
+	// the first tableSize elements.
+	var table [maxTableSize]uint16
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := len(src) - inputMargin
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := 0
+
+	// The encoded form must start with a literal, as there are no previous
+	// bytes to copy, so we start looking for hash matches at s == 1.
+	s := 1
+	nextHash := hash(load32(src, s), shift)
+
+	for {
+		// Copied from the C++ snappy implementation:
+		//
+		// Heuristic match skipping: If 32 bytes are scanned with no matches
+		// found, start looking only at every other byte. If 32 more bytes are
+		// scanned (or skipped), look at every third byte, etc.. When a match
+		// is found, immediately go back to looking at every byte. This is a
+		// small loss (~5% performance, ~0.1% density) for compressible data
+		// due to more bookkeeping, but for non-compressible data (such as
+		// JPEG) it's a huge win since the compressor quickly "realizes" the
+		// data is incompressible and doesn't bother looking for matches
+		// everywhere.
+		//
+		// The "skip" variable keeps track of how many bytes there are since
+		// the last match; dividing it by 32 (ie. right-shifting by five) gives
+		// the number of bytes to move ahead for each iteration.
+		skip := 32
+
+		nextS := s
+		candidate := 0
+		for {
+			s = nextS
+			bytesBetweenHashLookups := skip >> 5
+			nextS = s + bytesBetweenHashLookups
+			skip += bytesBetweenHashLookups
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+			candidate = int(table[nextHash&tableMask])
+			table[nextHash&tableMask] = uint16(s)
+			nextHash = hash(load32(src, nextS), shift)
+			if load32(src, s) == load32(src, candidate) {
+				break
+			}
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+		d += emitLiteral(dst[d:], src[nextEmit:s])
+
+		// Call emitCopy, and then see if another emitCopy could be our next
+		// move. Repeat until we find no match for the input immediately after
+		// what was consumed by the last emitCopy call.
+		//
+		// If we exit this loop normally then we need to call emitLiteral next,
+		// though we don't yet know how big the literal will be. We handle that
+		// by proceeding to the next iteration of the main loop. We also can
+		// exit this loop via goto if we get close to exhausting the input.
+		for {
+			// Invariant: we have a 4-byte match at s, and no need to emit any
+			// literal bytes prior to s.
+			base := s
+
+			// Extend the 4-byte match as long as possible.
+			//
+			// This is an inlined version of:
+			//	s = extendMatch(src, candidate+4, s+4)
+			s += 4
+			for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
+			}
+
+			d += emitCopy(dst[d:], base-candidate, s-base)
+			nextEmit = s
+			if s >= sLimit {
+				goto emitRemainder
+			}
+
+			// We could immediately start working at s now, but to improve
+			// compression we first update the hash table at s-1 and at s. If
+			// another emitCopy is not our next move, also calculate nextHash
+			// at s+1. At least on GOARCH=amd64, these three hash calculations
+			// are faster as one load64 call (with some shifts) instead of
+			// three load32 calls.
+			x := load64(src, s-1)
+			prevHash := hash(uint32(x>>0), shift)
+			table[prevHash&tableMask] = uint16(s - 1)
+			currHash := hash(uint32(x>>8), shift)
+			candidate = int(table[currHash&tableMask])
+			table[currHash&tableMask] = uint16(s)
+			if uint32(x>>8) != load32(src, candidate) {
+				nextHash = hash(uint32(x>>16), shift)
+				s++
+				break
+			}
+		}
+	}
+
+emitRemainder:
+	if nextEmit < len(src) {
+		d += emitLiteral(dst[d:], src[nextEmit:])
+	}
+	return d
+}
diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go
new file mode 100644
index 0000000..ece692e
--- /dev/null
+++ b/vendor/github.com/golang/snappy/snappy.go
@@ -0,0 +1,98 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the Snappy compression format. It aims for very
+// high speeds and reasonable compression.
+//
+// There are actually two Snappy formats: block and stream. They are related,
+// but different: trying to decompress block-compressed data as a Snappy stream
+// will fail, and vice versa. The block format is the Decode and Encode
+// functions and the stream format is the Reader and Writer types.
+//
+// The block format, the more common case, is used when the complete size (the
+// number of bytes) of the original data is known upfront, at the time
+// compression starts. The stream format, also known as the framing format, is
+// for when that isn't always true.
+//
+// The canonical, C++ implementation is at https://github.com/google/snappy and
+// it only implements the block format.
+package snappy // import "github.com/golang/snappy"
+
+import (
+	"hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+  - If m < 60, the next 1 + m bytes are literal bytes.
+  - Otherwise, let n be the little-endian unsigned integer denoted by the next
+    m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+  - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+    The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+    of the offset. The next byte is bits 0-7 of the offset.
+  - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+    The length is 1 + m. The offset is the little-endian unsigned integer
+    denoted by the next 2 bytes.
+  - For l == 3, this tag is a legacy format that is no longer issued by most
+    encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
+    [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+    integer denoted by the next 4 bytes.
+*/
+const (
+	tagLiteral = 0x00
+	tagCopy1   = 0x01
+	tagCopy2   = 0x02
+	tagCopy4   = 0x03
+)
+
+const (
+	checksumSize    = 4
+	chunkHeaderSize = 4
+	magicChunk      = "\xff\x06\x00\x00" + magicBody
+	magicBody       = "sNaPpY"
+
+	// maxBlockSize is the maximum size of the input to encodeBlock. It is not
+	// part of the wire format per se, but some parts of the encoder assume
+	// that an offset fits into a uint16.
+	//
+	// Also, for the framing format (Writer type instead of Encode function),
+	// https://github.com/google/snappy/blob/master/framing_format.txt says
+	// that "the uncompressed data in a chunk must be no longer than 65536
+	// bytes".
+	maxBlockSize = 65536
+
+	// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
+	// hard coded to be a const instead of a variable, so that obufLen can also
+	// be a const. Their equivalence is confirmed by
+	// TestMaxEncodedLenOfMaxBlockSize.
+	maxEncodedLenOfMaxBlockSize = 76490
+
+	obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
+	obufLen       = obufHeaderLen + maxEncodedLenOfMaxBlockSize
+)
+
+const (
+	chunkTypeCompressedData   = 0x00
+	chunkTypeUncompressedData = 0x01
+	chunkTypePadding          = 0xfe
+	chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+	c := crc32.Update(0, crcTable, b)
+	return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml
new file mode 100644
index 0000000..d8156a6
--- /dev/null
+++ b/vendor/github.com/google/uuid/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+
+go:
+  - 1.4.3
+  - 1.5.3
+  - tip
+
+script:
+  - go test -v ./...
diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md
new file mode 100644
index 0000000..04fdf09
--- /dev/null
+++ b/vendor/github.com/google/uuid/CONTRIBUTING.md
@@ -0,0 +1,10 @@
+# How to contribute
+
+We definitely welcome patches and contribution to this project!
+
+### Legal requirements
+
+In order to protect both you and ourselves, you will need to sign the
+[Contributor License Agreement](https://cla.developers.google.com/clas).
+
+You may have already signed it for other Google projects.
diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS
new file mode 100644
index 0000000..b4bb97f
--- /dev/null
+++ b/vendor/github.com/google/uuid/CONTRIBUTORS
@@ -0,0 +1,9 @@
+Paul Borman <borman@google.com>
+bmatsuo
+shawnps
+theory
+jboverfelt
+dsymonds
+cd1
+wallclockbuilder
+dansouza
diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE
new file mode 100644
index 0000000..5dc6826
--- /dev/null
+++ b/vendor/github.com/google/uuid/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009,2014 Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md
new file mode 100644
index 0000000..9d92c11
--- /dev/null
+++ b/vendor/github.com/google/uuid/README.md
@@ -0,0 +1,19 @@
+# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master)
+The uuid package generates and inspects UUIDs based on
+[RFC 4122](http://tools.ietf.org/html/rfc4122)
+and DCE 1.1: Authentication and Security Services. 
+
+This package is based on the github.com/pborman/uuid package (previously named
+code.google.com/p/go-uuid).  It differs from these earlier packages in that
+a UUID is a 16 byte array rather than a byte slice.  One loss due to this
+change is the ability to represent an invalid UUID (vs a NIL UUID).
+
+###### Install
+`go get github.com/google/uuid`
+
+###### Documentation 
+[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid)
+
+Full `go doc` style documentation for the package can be viewed online without
+installing this package by using the GoDoc site here: 
+http://godoc.org/github.com/google/uuid
diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go
new file mode 100644
index 0000000..fa820b9
--- /dev/null
+++ b/vendor/github.com/google/uuid/dce.go
@@ -0,0 +1,80 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"encoding/binary"
+	"fmt"
+	"os"
+)
+
+// A Domain represents a Version 2 domain
+type Domain byte
+
+// Domain constants for DCE Security (Version 2) UUIDs.
+const (
+	Person = Domain(0)
+	Group  = Domain(1)
+	Org    = Domain(2)
+)
+
+// NewDCESecurity returns a DCE Security (Version 2) UUID.
+//
+// The domain should be one of Person, Group or Org.
+// On a POSIX system the id should be the users UID for the Person
+// domain and the users GID for the Group.  The meaning of id for
+// the domain Org or on non-POSIX systems is site defined.
+//
+// For a given domain/id pair the same token may be returned for up to
+// 7 minutes and 10 seconds.
+func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
+	uuid, err := NewUUID()
+	if err == nil {
+		uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
+		uuid[9] = byte(domain)
+		binary.BigEndian.PutUint32(uuid[0:], id)
+	}
+	return uuid, err
+}
+
+// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
+// domain with the id returned by os.Getuid.
+//
+//  NewDCESecurity(Person, uint32(os.Getuid()))
+func NewDCEPerson() (UUID, error) {
+	return NewDCESecurity(Person, uint32(os.Getuid()))
+}
+
+// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
+// domain with the id returned by os.Getgid.
+//
+//  NewDCESecurity(Group, uint32(os.Getgid()))
+func NewDCEGroup() (UUID, error) {
+	return NewDCESecurity(Group, uint32(os.Getgid()))
+}
+
+// Domain returns the domain for a Version 2 UUID.  Domains are only defined
+// for Version 2 UUIDs.
+func (uuid UUID) Domain() Domain {
+	return Domain(uuid[9])
+}
+
+// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
+// UUIDs.
+func (uuid UUID) ID() uint32 {
+	return binary.BigEndian.Uint32(uuid[0:4])
+}
+
+func (d Domain) String() string {
+	switch d {
+	case Person:
+		return "Person"
+	case Group:
+		return "Group"
+	case Org:
+		return "Org"
+	}
+	return fmt.Sprintf("Domain%d", int(d))
+}
diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go
new file mode 100644
index 0000000..5b8a4b9
--- /dev/null
+++ b/vendor/github.com/google/uuid/doc.go
@@ -0,0 +1,12 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package uuid generates and inspects UUIDs.
+//
+// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
+// Services.
+//
+// A UUID is a 16 byte (128 bit) array.  UUIDs may be used as keys to
+// maps or compared directly.
+package uuid
diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod
new file mode 100644
index 0000000..fc84cd7
--- /dev/null
+++ b/vendor/github.com/google/uuid/go.mod
@@ -0,0 +1 @@
+module github.com/google/uuid
diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go
new file mode 100644
index 0000000..b174616
--- /dev/null
+++ b/vendor/github.com/google/uuid/hash.go
@@ -0,0 +1,53 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"crypto/md5"
+	"crypto/sha1"
+	"hash"
+)
+
+// Well known namespace IDs and UUIDs
+var (
+	NameSpaceDNS  = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
+	NameSpaceURL  = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
+	NameSpaceOID  = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
+	NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
+	Nil           UUID // empty UUID, all zeros
+)
+
+// NewHash returns a new UUID derived from the hash of space concatenated with
+// data generated by h.  The hash should be at least 16 byte in length.  The
+// first 16 bytes of the hash are used to form the UUID.  The version of the
+// UUID will be the lower 4 bits of version.  NewHash is used to implement
+// NewMD5 and NewSHA1.
+func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
+	h.Reset()
+	h.Write(space[:])
+	h.Write(data)
+	s := h.Sum(nil)
+	var uuid UUID
+	copy(uuid[:], s)
+	uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
+	uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
+	return uuid
+}
+
+// NewMD5 returns a new MD5 (Version 3) UUID based on the
+// supplied name space and data.  It is the same as calling:
+//
+//  NewHash(md5.New(), space, data, 3)
+func NewMD5(space UUID, data []byte) UUID {
+	return NewHash(md5.New(), space, data, 3)
+}
+
+// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
+// supplied name space and data.  It is the same as calling:
+//
+//  NewHash(sha1.New(), space, data, 5)
+func NewSHA1(space UUID, data []byte) UUID {
+	return NewHash(sha1.New(), space, data, 5)
+}
diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go
new file mode 100644
index 0000000..7f9e0c6
--- /dev/null
+++ b/vendor/github.com/google/uuid/marshal.go
@@ -0,0 +1,37 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "fmt"
+
+// MarshalText implements encoding.TextMarshaler.
+func (uuid UUID) MarshalText() ([]byte, error) {
+	var js [36]byte
+	encodeHex(js[:], uuid)
+	return js[:], nil
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (uuid *UUID) UnmarshalText(data []byte) error {
+	id, err := ParseBytes(data)
+	if err == nil {
+		*uuid = id
+	}
+	return err
+}
+
+// MarshalBinary implements encoding.BinaryMarshaler.
+func (uuid UUID) MarshalBinary() ([]byte, error) {
+	return uuid[:], nil
+}
+
+// UnmarshalBinary implements encoding.BinaryUnmarshaler.
+func (uuid *UUID) UnmarshalBinary(data []byte) error {
+	if len(data) != 16 {
+		return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
+	}
+	copy(uuid[:], data)
+	return nil
+}
diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go
new file mode 100644
index 0000000..3e4e90d
--- /dev/null
+++ b/vendor/github.com/google/uuid/node.go
@@ -0,0 +1,89 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"sync"
+)
+
+var (
+	nodeMu sync.Mutex
+	ifname string  // name of interface being used
+	nodeID [6]byte // hardware for version 1 UUIDs
+	zeroID [6]byte // nodeID with only 0's
+)
+
+// NodeInterface returns the name of the interface from which the NodeID was
+// derived.  The interface "user" is returned if the NodeID was set by
+// SetNodeID.
+func NodeInterface() string {
+	defer nodeMu.Unlock()
+	nodeMu.Lock()
+	return ifname
+}
+
+// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
+// If name is "" then the first usable interface found will be used or a random
+// Node ID will be generated.  If a named interface cannot be found then false
+// is returned.
+//
+// SetNodeInterface never fails when name is "".
+func SetNodeInterface(name string) bool {
+	defer nodeMu.Unlock()
+	nodeMu.Lock()
+	return setNodeInterface(name)
+}
+
+func setNodeInterface(name string) bool {
+	iname, addr := getHardwareInterface(name) // null implementation for js
+	if iname != "" && addr != nil {
+		ifname = iname
+		copy(nodeID[:], addr)
+		return true
+	}
+
+	// We found no interfaces with a valid hardware address.  If name
+	// does not specify a specific interface generate a random Node ID
+	// (section 4.1.6)
+	if name == "" {
+		randomBits(nodeID[:])
+		return true
+	}
+	return false
+}
+
+// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
+// if not already set.
+func NodeID() []byte {
+	defer nodeMu.Unlock()
+	nodeMu.Lock()
+	if nodeID == zeroID {
+		setNodeInterface("")
+	}
+	nid := nodeID
+	return nid[:]
+}
+
+// SetNodeID sets the Node ID to be used for Version 1 UUIDs.  The first 6 bytes
+// of id are used.  If id is less than 6 bytes then false is returned and the
+// Node ID is not set.
+func SetNodeID(id []byte) bool {
+	if len(id) < 6 {
+		return false
+	}
+	defer nodeMu.Unlock()
+	nodeMu.Lock()
+	copy(nodeID[:], id)
+	ifname = "user"
+	return true
+}
+
+// NodeID returns the 6 byte node id encoded in uuid.  It returns nil if uuid is
+// not valid.  The NodeID is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) NodeID() []byte {
+	var node [6]byte
+	copy(node[:], uuid[10:])
+	return node[:]
+}
diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go
new file mode 100644
index 0000000..24b78ed
--- /dev/null
+++ b/vendor/github.com/google/uuid/node_js.go
@@ -0,0 +1,12 @@
+// Copyright 2017 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build js
+
+package uuid
+
+// getHardwareInterface returns nil values for the JS version of the code.
+// This remvoves the "net" dependency, because it is not used in the browser.
+// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
+func getHardwareInterface(name string) (string, []byte) { return "", nil }
diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go
new file mode 100644
index 0000000..0cbbcdd
--- /dev/null
+++ b/vendor/github.com/google/uuid/node_net.go
@@ -0,0 +1,33 @@
+// Copyright 2017 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !js
+
+package uuid
+
+import "net"
+
+var interfaces []net.Interface // cached list of interfaces
+
+// getHardwareInterface returns the name and hardware address of interface name.
+// If name is "" then the name and hardware address of one of the system's
+// interfaces is returned.  If no interfaces are found (name does not exist or
+// there are no interfaces) then "", nil is returned.
+//
+// Only addresses of at least 6 bytes are returned.
+func getHardwareInterface(name string) (string, []byte) {
+	if interfaces == nil {
+		var err error
+		interfaces, err = net.Interfaces()
+		if err != nil {
+			return "", nil
+		}
+	}
+	for _, ifs := range interfaces {
+		if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
+			return ifs.Name, ifs.HardwareAddr
+		}
+	}
+	return "", nil
+}
diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go
new file mode 100644
index 0000000..f326b54
--- /dev/null
+++ b/vendor/github.com/google/uuid/sql.go
@@ -0,0 +1,59 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"database/sql/driver"
+	"fmt"
+)
+
+// Scan implements sql.Scanner so UUIDs can be read from databases transparently
+// Currently, database types that map to string and []byte are supported. Please
+// consult database-specific driver documentation for matching types.
+func (uuid *UUID) Scan(src interface{}) error {
+	switch src := src.(type) {
+	case nil:
+		return nil
+
+	case string:
+		// if an empty UUID comes from a table, we return a null UUID
+		if src == "" {
+			return nil
+		}
+
+		// see Parse for required string format
+		u, err := Parse(src)
+		if err != nil {
+			return fmt.Errorf("Scan: %v", err)
+		}
+
+		*uuid = u
+
+	case []byte:
+		// if an empty UUID comes from a table, we return a null UUID
+		if len(src) == 0 {
+			return nil
+		}
+
+		// assumes a simple slice of bytes if 16 bytes
+		// otherwise attempts to parse
+		if len(src) != 16 {
+			return uuid.Scan(string(src))
+		}
+		copy((*uuid)[:], src)
+
+	default:
+		return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
+	}
+
+	return nil
+}
+
+// Value implements sql.Valuer so that UUIDs can be written to databases
+// transparently. Currently, UUIDs map to strings. Please consult
+// database-specific driver documentation for matching types.
+func (uuid UUID) Value() (driver.Value, error) {
+	return uuid.String(), nil
+}
diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go
new file mode 100644
index 0000000..e6ef06c
--- /dev/null
+++ b/vendor/github.com/google/uuid/time.go
@@ -0,0 +1,123 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"encoding/binary"
+	"sync"
+	"time"
+)
+
+// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
+// 1582.
+type Time int64
+
+const (
+	lillian    = 2299160          // Julian day of 15 Oct 1582
+	unix       = 2440587          // Julian day of 1 Jan 1970
+	epoch      = unix - lillian   // Days between epochs
+	g1582      = epoch * 86400    // seconds between epochs
+	g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
+)
+
+var (
+	timeMu   sync.Mutex
+	lasttime uint64 // last time we returned
+	clockSeq uint16 // clock sequence for this run
+
+	timeNow = time.Now // for testing
+)
+
+// UnixTime converts t the number of seconds and nanoseconds using the Unix
+// epoch of 1 Jan 1970.
+func (t Time) UnixTime() (sec, nsec int64) {
+	sec = int64(t - g1582ns100)
+	nsec = (sec % 10000000) * 100
+	sec /= 10000000
+	return sec, nsec
+}
+
+// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
+// clock sequence as well as adjusting the clock sequence as needed.  An error
+// is returned if the current time cannot be determined.
+func GetTime() (Time, uint16, error) {
+	defer timeMu.Unlock()
+	timeMu.Lock()
+	return getTime()
+}
+
+func getTime() (Time, uint16, error) {
+	t := timeNow()
+
+	// If we don't have a clock sequence already, set one.
+	if clockSeq == 0 {
+		setClockSequence(-1)
+	}
+	now := uint64(t.UnixNano()/100) + g1582ns100
+
+	// If time has gone backwards with this clock sequence then we
+	// increment the clock sequence
+	if now <= lasttime {
+		clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
+	}
+	lasttime = now
+	return Time(now), clockSeq, nil
+}
+
+// ClockSequence returns the current clock sequence, generating one if not
+// already set.  The clock sequence is only used for Version 1 UUIDs.
+//
+// The uuid package does not use global static storage for the clock sequence or
+// the last time a UUID was generated.  Unless SetClockSequence is used, a new
+// random clock sequence is generated the first time a clock sequence is
+// requested by ClockSequence, GetTime, or NewUUID.  (section 4.2.1.1)
+func ClockSequence() int {
+	defer timeMu.Unlock()
+	timeMu.Lock()
+	return clockSequence()
+}
+
+func clockSequence() int {
+	if clockSeq == 0 {
+		setClockSequence(-1)
+	}
+	return int(clockSeq & 0x3fff)
+}
+
+// SetClockSequence sets the clock sequence to the lower 14 bits of seq.  Setting to
+// -1 causes a new sequence to be generated.
+func SetClockSequence(seq int) {
+	defer timeMu.Unlock()
+	timeMu.Lock()
+	setClockSequence(seq)
+}
+
+func setClockSequence(seq int) {
+	if seq == -1 {
+		var b [2]byte
+		randomBits(b[:]) // clock sequence
+		seq = int(b[0])<<8 | int(b[1])
+	}
+	oldSeq := clockSeq
+	clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
+	if oldSeq != clockSeq {
+		lasttime = 0
+	}
+}
+
+// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
+// uuid.  The time is only defined for version 1 and 2 UUIDs.
+func (uuid UUID) Time() Time {
+	time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+	time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+	time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+	return Time(time)
+}
+
+// ClockSequence returns the clock sequence encoded in uuid.
+// The clock sequence is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) ClockSequence() int {
+	return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
+}
diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go
new file mode 100644
index 0000000..5ea6c73
--- /dev/null
+++ b/vendor/github.com/google/uuid/util.go
@@ -0,0 +1,43 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"io"
+)
+
+// randomBits completely fills slice b with random data.
+func randomBits(b []byte) {
+	if _, err := io.ReadFull(rander, b); err != nil {
+		panic(err.Error()) // rand should never fail
+	}
+}
+
+// xvalues returns the value of a byte as a hexadecimal digit or 255.
+var xvalues = [256]byte{
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
+	255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+}
+
+// xtob converts hex characters x1 and x2 into a byte.
+func xtob(x1, x2 byte) (byte, bool) {
+	b1 := xvalues[x1]
+	b2 := xvalues[x2]
+	return (b1 << 4) | b2, b1 != 255 && b2 != 255
+}
diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go
new file mode 100644
index 0000000..524404c
--- /dev/null
+++ b/vendor/github.com/google/uuid/uuid.go
@@ -0,0 +1,245 @@
+// Copyright 2018 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"bytes"
+	"crypto/rand"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"io"
+	"strings"
+)
+
+// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
+// 4122.
+type UUID [16]byte
+
+// A Version represents a UUID's version.
+type Version byte
+
+// A Variant represents a UUID's variant.
+type Variant byte
+
+// Constants returned by Variant.
+const (
+	Invalid   = Variant(iota) // Invalid UUID
+	RFC4122                   // The variant specified in RFC4122
+	Reserved                  // Reserved, NCS backward compatibility.
+	Microsoft                 // Reserved, Microsoft Corporation backward compatibility.
+	Future                    // Reserved for future definition.
+)
+
+var rander = rand.Reader // random function
+
+// Parse decodes s into a UUID or returns an error.  Both the standard UUID
+// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
+// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
+// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
+func Parse(s string) (UUID, error) {
+	var uuid UUID
+	switch len(s) {
+	// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+	case 36:
+
+	// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+	case 36 + 9:
+		if strings.ToLower(s[:9]) != "urn:uuid:" {
+			return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
+		}
+		s = s[9:]
+
+	// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+	case 36 + 2:
+		s = s[1:]
+
+	// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+	case 32:
+		var ok bool
+		for i := range uuid {
+			uuid[i], ok = xtob(s[i*2], s[i*2+1])
+			if !ok {
+				return uuid, errors.New("invalid UUID format")
+			}
+		}
+		return uuid, nil
+	default:
+		return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
+	}
+	// s is now at least 36 bytes long
+	// it must be of the form  xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+	if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+		return uuid, errors.New("invalid UUID format")
+	}
+	for i, x := range [16]int{
+		0, 2, 4, 6,
+		9, 11,
+		14, 16,
+		19, 21,
+		24, 26, 28, 30, 32, 34} {
+		v, ok := xtob(s[x], s[x+1])
+		if !ok {
+			return uuid, errors.New("invalid UUID format")
+		}
+		uuid[i] = v
+	}
+	return uuid, nil
+}
+
+// ParseBytes is like Parse, except it parses a byte slice instead of a string.
+func ParseBytes(b []byte) (UUID, error) {
+	var uuid UUID
+	switch len(b) {
+	case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+	case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+		if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
+			return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
+		}
+		b = b[9:]
+	case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+		b = b[1:]
+	case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+		var ok bool
+		for i := 0; i < 32; i += 2 {
+			uuid[i/2], ok = xtob(b[i], b[i+1])
+			if !ok {
+				return uuid, errors.New("invalid UUID format")
+			}
+		}
+		return uuid, nil
+	default:
+		return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
+	}
+	// s is now at least 36 bytes long
+	// it must be of the form  xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+	if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
+		return uuid, errors.New("invalid UUID format")
+	}
+	for i, x := range [16]int{
+		0, 2, 4, 6,
+		9, 11,
+		14, 16,
+		19, 21,
+		24, 26, 28, 30, 32, 34} {
+		v, ok := xtob(b[x], b[x+1])
+		if !ok {
+			return uuid, errors.New("invalid UUID format")
+		}
+		uuid[i] = v
+	}
+	return uuid, nil
+}
+
+// MustParse is like Parse but panics if the string cannot be parsed.
+// It simplifies safe initialization of global variables holding compiled UUIDs.
+func MustParse(s string) UUID {
+	uuid, err := Parse(s)
+	if err != nil {
+		panic(`uuid: Parse(` + s + `): ` + err.Error())
+	}
+	return uuid
+}
+
+// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
+// does not have a length of 16. The bytes are copied from the slice.
+func FromBytes(b []byte) (uuid UUID, err error) {
+	err = uuid.UnmarshalBinary(b)
+	return uuid, err
+}
+
+// Must returns uuid if err is nil and panics otherwise.
+func Must(uuid UUID, err error) UUID {
+	if err != nil {
+		panic(err)
+	}
+	return uuid
+}
+
+// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// , or "" if uuid is invalid.
+func (uuid UUID) String() string {
+	var buf [36]byte
+	encodeHex(buf[:], uuid)
+	return string(buf[:])
+}
+
+// URN returns the RFC 2141 URN form of uuid,
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx,  or "" if uuid is invalid.
+func (uuid UUID) URN() string {
+	var buf [36 + 9]byte
+	copy(buf[:], "urn:uuid:")
+	encodeHex(buf[9:], uuid)
+	return string(buf[:])
+}
+
+func encodeHex(dst []byte, uuid UUID) {
+	hex.Encode(dst, uuid[:4])
+	dst[8] = '-'
+	hex.Encode(dst[9:13], uuid[4:6])
+	dst[13] = '-'
+	hex.Encode(dst[14:18], uuid[6:8])
+	dst[18] = '-'
+	hex.Encode(dst[19:23], uuid[8:10])
+	dst[23] = '-'
+	hex.Encode(dst[24:], uuid[10:])
+}
+
+// Variant returns the variant encoded in uuid.
+func (uuid UUID) Variant() Variant {
+	switch {
+	case (uuid[8] & 0xc0) == 0x80:
+		return RFC4122
+	case (uuid[8] & 0xe0) == 0xc0:
+		return Microsoft
+	case (uuid[8] & 0xe0) == 0xe0:
+		return Future
+	default:
+		return Reserved
+	}
+}
+
+// Version returns the version of uuid.
+func (uuid UUID) Version() Version {
+	return Version(uuid[6] >> 4)
+}
+
+func (v Version) String() string {
+	if v > 15 {
+		return fmt.Sprintf("BAD_VERSION_%d", v)
+	}
+	return fmt.Sprintf("VERSION_%d", v)
+}
+
+func (v Variant) String() string {
+	switch v {
+	case RFC4122:
+		return "RFC4122"
+	case Reserved:
+		return "Reserved"
+	case Microsoft:
+		return "Microsoft"
+	case Future:
+		return "Future"
+	case Invalid:
+		return "Invalid"
+	}
+	return fmt.Sprintf("BadVariant%d", int(v))
+}
+
+// SetRand sets the random number generator to r, which implements io.Reader.
+// If r.Read returns an error when the package requests random data then
+// a panic will be issued.
+//
+// Calling SetRand with nil sets the random number generator to the default
+// generator.
+func SetRand(r io.Reader) {
+	if r == nil {
+		rander = rand.Reader
+		return
+	}
+	rander = r
+}
diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go
new file mode 100644
index 0000000..199a1ac
--- /dev/null
+++ b/vendor/github.com/google/uuid/version1.go
@@ -0,0 +1,44 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"encoding/binary"
+)
+
+// NewUUID returns a Version 1 UUID based on the current NodeID and clock
+// sequence, and the current time.  If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically.  If the NodeID cannot
+// be set NewUUID returns nil.  If clock sequence has not been set by
+// SetClockSequence then it will be set automatically.  If GetTime fails to
+// return the current NewUUID returns nil and an error.
+//
+// In most cases, New should be used.
+func NewUUID() (UUID, error) {
+	nodeMu.Lock()
+	if nodeID == zeroID {
+		setNodeInterface("")
+	}
+	nodeMu.Unlock()
+
+	var uuid UUID
+	now, seq, err := GetTime()
+	if err != nil {
+		return uuid, err
+	}
+
+	timeLow := uint32(now & 0xffffffff)
+	timeMid := uint16((now >> 32) & 0xffff)
+	timeHi := uint16((now >> 48) & 0x0fff)
+	timeHi |= 0x1000 // Version 1
+
+	binary.BigEndian.PutUint32(uuid[0:], timeLow)
+	binary.BigEndian.PutUint16(uuid[4:], timeMid)
+	binary.BigEndian.PutUint16(uuid[6:], timeHi)
+	binary.BigEndian.PutUint16(uuid[8:], seq)
+	copy(uuid[10:], nodeID[:])
+
+	return uuid, nil
+}
diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go
new file mode 100644
index 0000000..84af91c
--- /dev/null
+++ b/vendor/github.com/google/uuid/version4.go
@@ -0,0 +1,38 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "io"
+
+// New creates a new random UUID or panics.  New is equivalent to
+// the expression
+//
+//    uuid.Must(uuid.NewRandom())
+func New() UUID {
+	return Must(NewRandom())
+}
+
+// NewRandom returns a Random (Version 4) UUID.
+//
+// The strength of the UUIDs is based on the strength of the crypto/rand
+// package.
+//
+// A note about uniqueness derived from the UUID Wikipedia entry:
+//
+//  Randomly generated UUIDs have 122 random bits.  One's annual risk of being
+//  hit by a meteorite is estimated to be one chance in 17 billion, that
+//  means the probability is about 0.00000000006 (6 × 10−11),
+//  equivalent to the odds of creating a few tens of trillions of UUIDs in a
+//  year and having one duplicate.
+func NewRandom() (UUID, error) {
+	var uuid UUID
+	_, err := io.ReadFull(rander, uuid[:])
+	if err != nil {
+		return Nil, err
+	}
+	uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+	uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+	return uuid, nil
+}
diff --git a/vendor/github.com/gyuho/goraph/.travis.yml b/vendor/github.com/gyuho/goraph/.travis.yml
new file mode 100644
index 0000000..4228e24
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/.travis.yml
@@ -0,0 +1,11 @@
+language: go
+
+sudo: false
+
+go:
+- 1.6
+- tip
+
+script:
+- ./test
+
diff --git a/vendor/github.com/gyuho/goraph/LICENSE b/vendor/github.com/gyuho/goraph/LICENSE
new file mode 100644
index 0000000..f7303ba
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Gyu-Ho Lee
+Copyright (c) 2016 Google Inc
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/gyuho/goraph/README.md b/vendor/github.com/gyuho/goraph/README.md
new file mode 100644
index 0000000..0b5b590
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/README.md
@@ -0,0 +1,32 @@
+## goraph [![Build Status](https://img.shields.io/travis/gyuho/goraph.svg?style=flat-square)](https://travis-ci.org/gyuho/goraph) [![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/gyuho/goraph)
+
+Package goraph implements graph data structure and algorithms.
+
+```
+go get -v gopkg.in/gyuho/goraph.v2;
+```
+
+<br>
+I have tutorials and visualizations of graph, tree algorithms:
+
+- [**_Binary search tree_**](https://github.com/gyuho/learn/tree/master/doc/binary_search_tree)
+- [**_Go: heap, priority queue_**](https://github.com/gyuho/learn/tree/master/doc/go_heap_priority_queue)
+- [**_Go: red black tree_**](https://github.com/gyuho/learn/tree/master/doc/go_red_black_tree)
+- [**_Go: b-tree_**](https://github.com/gyuho/learn/tree/master/doc/go_b_tree)
+- [**_Go: graph, interface_**](https://github.com/gyuho/learn/tree/master/doc/go_graph_interface)
+- [**_Go: graph, traversal_**](https://github.com/gyuho/learn/tree/master/doc/go_graph_traversal)
+- [**_Go: graph, shortest path_**](https://github.com/gyuho/learn/tree/master/doc/go_graph_shortest_path)
+- [**_Go: graph, topological sort_**](https://github.com/gyuho/learn/tree/master/doc/go_graph_topological_sort)
+- [**_Go: graph, minimum spanning tree_**](https://github.com/gyuho/learn/tree/master/doc/go_graph_minimum_spanning_tree)
+- [**_Go: graph, strongly connected components_**](https://github.com/gyuho/learn/tree/master/doc/go_graph_strongly_connected_components)
+
+<br>
+For fast query and retrieval, please check out  <a href="http://google-opensource.blogspot.co.uk/2014/06/cayley-graphs-in-go.html" target="_blank">Cayley</a>.
+
+
+<br>
+<a href="http://www.youtube.com/watch?v=ImMnYq2zP4Y" target="_blank"><img src="http://img.youtube.com/vi/ImMnYq2zP4Y/0.jpg"></a>
+
+- <a href="https://www.youtube.com/channel/UCWzSgIp_DYRQnEsJuH32Fww" target="_blank">Please visit my YouTube Channel</a>
+- <a href="https://www.youtube.com/watch?v=NdfIfxTsVDo&list=PLT6aABhFfinvsSn1H195JLuHaXNS6UVhf" target="_blank">`Tree`, `Graph` Theory Algorithms (Playlist)</a>
+- <a href="https://www.youtube.com/watch?v=ImMnYq2zP4Y&list=PLT6aABhFfinvsSn1H195JLuHaXNS6UVhf&index=4" target="_blank">`Graph` : BFS, DFS</a>
diff --git a/vendor/github.com/gyuho/goraph/disjoint_set.go b/vendor/github.com/gyuho/goraph/disjoint_set.go
new file mode 100644
index 0000000..3a8085f
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/disjoint_set.go
@@ -0,0 +1,67 @@
+package goraph
+
+import "sync"
+
+// DisjointSet implements disjoint set.
+// (https://en.wikipedia.org/wiki/Disjoint-set_data_structure)
+type DisjointSet struct {
+	represent string
+	members   map[string]struct{}
+}
+
+// Forests is a set of DisjointSet.
+type Forests struct {
+	mu   sync.Mutex // guards the following
+	data map[*DisjointSet]struct{}
+}
+
+// NewForests creates a new Forests.
+func NewForests() *Forests {
+	set := &Forests{}
+	set.data = make(map[*DisjointSet]struct{})
+	return set
+}
+
+// MakeDisjointSet creates a DisjointSet.
+func MakeDisjointSet(forests *Forests, name string) {
+	newDS := &DisjointSet{}
+	newDS.represent = name
+	members := make(map[string]struct{})
+	members[name] = struct{}{}
+	newDS.members = members
+	forests.mu.Lock()
+	defer forests.mu.Unlock()
+	forests.data[newDS] = struct{}{}
+}
+
+// FindSet returns the DisjointSet with the represent name.
+func FindSet(forests *Forests, name string) *DisjointSet {
+	forests.mu.Lock()
+	defer forests.mu.Unlock()
+	for data := range forests.data {
+		if data.represent == name {
+			return data
+		}
+		for k := range data.members {
+			if k == name {
+				return data
+			}
+		}
+	}
+	return nil
+}
+
+// Union unions two DisjointSet, with ds1's represent.
+func Union(forests *Forests, ds1, ds2 *DisjointSet) {
+	newDS := &DisjointSet{}
+	newDS.represent = ds1.represent
+	newDS.members = ds1.members
+	for k := range ds2.members {
+		newDS.members[k] = struct{}{}
+	}
+	forests.mu.Lock()
+	defer forests.mu.Unlock()
+	forests.data[newDS] = struct{}{}
+	delete(forests.data, ds1)
+	delete(forests.data, ds2)
+}
diff --git a/vendor/github.com/gyuho/goraph/doc.go b/vendor/github.com/gyuho/goraph/doc.go
new file mode 100644
index 0000000..191d299
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/doc.go
@@ -0,0 +1,2 @@
+// Package goraph implements graph data structure and algorithms.
+package goraph // import "github.com/gyuho/goraph"
diff --git a/vendor/github.com/gyuho/goraph/graph.go b/vendor/github.com/gyuho/goraph/graph.go
new file mode 100644
index 0000000..87f87c5
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/graph.go
@@ -0,0 +1,503 @@
+package goraph
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"sync"
+)
+
+// ID is unique identifier.
+type ID interface {
+	// String returns the string ID.
+	String() string
+}
+
+type StringID string
+
+func (s StringID) String() string {
+	return string(s)
+}
+
+// Node is vertex. The ID must be unique within the graph.
+type Node interface {
+	// ID returns the ID.
+	ID() ID
+	String() string
+}
+
+type node struct {
+	id string
+}
+
+var nodeCnt uint64
+
+func NewNode(id string) Node {
+	return &node{
+		id: id,
+	}
+}
+
+func (n *node) ID() ID {
+	return StringID(n.id)
+}
+
+func (n *node) String() string {
+	return n.id
+}
+
+// Edge connects between two Nodes.
+type Edge interface {
+	Source() Node
+	Target() Node
+	Weight() float64
+	String() string
+}
+
+// edge is an Edge from Source to Target.
+type edge struct {
+	src Node
+	tgt Node
+	wgt float64
+}
+
+func NewEdge(src, tgt Node, wgt float64) Edge {
+	return &edge{
+		src: src,
+		tgt: tgt,
+		wgt: wgt,
+	}
+}
+
+func (e *edge) Source() Node {
+	return e.src
+}
+
+func (e *edge) Target() Node {
+	return e.tgt
+}
+
+func (e *edge) Weight() float64 {
+	return e.wgt
+}
+
+func (e *edge) String() string {
+	return fmt.Sprintf("%s -- %.3f -→ %s\n", e.src, e.wgt, e.tgt)
+}
+
+type EdgeSlice []Edge
+
+func (e EdgeSlice) Len() int           { return len(e) }
+func (e EdgeSlice) Less(i, j int) bool { return e[i].Weight() < e[j].Weight() }
+func (e EdgeSlice) Swap(i, j int)      { e[i], e[j] = e[j], e[i] }
+
+// Graph describes the methods of graph operations.
+// It assumes that the identifier of a Node is unique.
+// And weight values is float64.
+type Graph interface {
+	// Init initializes a Graph.
+	Init()
+
+	// GetNodeCount returns the total number of nodes.
+	GetNodeCount() int
+
+	// GetNode finds the Node. It returns nil if the Node
+	// does not exist in the graph.
+	GetNode(id ID) Node
+
+	// GetNodes returns a map from node ID to
+	// empty struct value. Graph does not allow duplicate
+	// node ID or name.
+	GetNodes() map[ID]Node
+
+	// AddNode adds a node to a graph, and returns false
+	// if the node already existed in the graph.
+	AddNode(nd Node) bool
+
+	// DeleteNode deletes a node from a graph.
+	// It returns true if it got deleted.
+	// And false if it didn't get deleted.
+	DeleteNode(id ID) bool
+
+	// AddEdge adds an edge from nd1 to nd2 with the weight.
+	// It returns error if a node does not exist.
+	AddEdge(id1, id2 ID, weight float64) error
+
+	// ReplaceEdge replaces an edge from id1 to id2 with the weight.
+	ReplaceEdge(id1, id2 ID, weight float64) error
+
+	// DeleteEdge deletes an edge from id1 to id2.
+	DeleteEdge(id1, id2 ID) error
+
+	// GetWeight returns the weight from id1 to id2.
+	GetWeight(id1, id2 ID) (float64, error)
+
+	// GetSources returns the map of parent Nodes.
+	// (Nodes that come towards the argument vertex.)
+	GetSources(id ID) (map[ID]Node, error)
+
+	// GetTargets returns the map of child Nodes.
+	// (Nodes that go out of the argument vertex.)
+	GetTargets(id ID) (map[ID]Node, error)
+
+	// String describes the Graph.
+	String() string
+}
+
+// graph is an internal default graph type that
+// implements all methods in Graph interface.
+type graph struct {
+	mu sync.RWMutex // guards the following
+
+	// idToNodes stores all nodes.
+	idToNodes map[ID]Node
+
+	// nodeToSources maps a Node identifer to sources(parents) with edge weights.
+	nodeToSources map[ID]map[ID]float64
+
+	// nodeToTargets maps a Node identifer to targets(children) with edge weights.
+	nodeToTargets map[ID]map[ID]float64
+}
+
+// newGraph returns a new graph.
+func newGraph() *graph {
+	return &graph{
+		idToNodes:     make(map[ID]Node),
+		nodeToSources: make(map[ID]map[ID]float64),
+		nodeToTargets: make(map[ID]map[ID]float64),
+		//
+		// without this
+		// panic: assignment to entry in nil map
+	}
+}
+
+// NewGraph returns a new graph.
+func NewGraph() Graph {
+	return newGraph()
+}
+
+func (g *graph) Init() {
+	// (X) g = newGraph()
+	// this only updates the pointer
+	//
+	//
+	// (X) *g = *newGraph()
+	// assignment copies lock value
+
+	g.idToNodes = make(map[ID]Node)
+	g.nodeToSources = make(map[ID]map[ID]float64)
+	g.nodeToTargets = make(map[ID]map[ID]float64)
+}
+
+func (g *graph) GetNodeCount() int {
+	g.mu.RLock()
+	defer g.mu.RUnlock()
+
+	return len(g.idToNodes)
+}
+
+func (g *graph) GetNode(id ID) Node {
+	g.mu.RLock()
+	defer g.mu.RUnlock()
+
+	return g.idToNodes[id]
+}
+
+func (g *graph) GetNodes() map[ID]Node {
+	g.mu.RLock()
+	defer g.mu.RUnlock()
+
+	return g.idToNodes
+}
+
+func (g *graph) unsafeExistID(id ID) bool {
+	_, ok := g.idToNodes[id]
+	return ok
+}
+
+func (g *graph) AddNode(nd Node) bool {
+	g.mu.Lock()
+	defer g.mu.Unlock()
+
+	if g.unsafeExistID(nd.ID()) {
+		return false
+	}
+
+	id := nd.ID()
+	g.idToNodes[id] = nd
+	return true
+}
+
+func (g *graph) DeleteNode(id ID) bool {
+	g.mu.Lock()
+	defer g.mu.Unlock()
+
+	if !g.unsafeExistID(id) {
+		return false
+	}
+
+	delete(g.idToNodes, id)
+
+	delete(g.nodeToTargets, id)
+	for _, smap := range g.nodeToTargets {
+		delete(smap, id)
+	}
+
+	delete(g.nodeToSources, id)
+	for _, smap := range g.nodeToSources {
+		delete(smap, id)
+	}
+
+	return true
+}
+
+func (g *graph) AddEdge(id1, id2 ID, weight float64) error {
+	g.mu.Lock()
+	defer g.mu.Unlock()
+
+	if !g.unsafeExistID(id1) {
+		return fmt.Errorf("%s does not exist in the graph.", id1)
+	}
+	if !g.unsafeExistID(id2) {
+		return fmt.Errorf("%s does not exist in the graph.", id2)
+	}
+
+	if _, ok := g.nodeToTargets[id1]; ok {
+		if v, ok2 := g.nodeToTargets[id1][id2]; ok2 {
+			g.nodeToTargets[id1][id2] = v + weight
+		} else {
+			g.nodeToTargets[id1][id2] = weight
+		}
+	} else {
+		tmap := make(map[ID]float64)
+		tmap[id2] = weight
+		g.nodeToTargets[id1] = tmap
+	}
+	if _, ok := g.nodeToSources[id2]; ok {
+		if v, ok2 := g.nodeToSources[id2][id1]; ok2 {
+			g.nodeToSources[id2][id1] = v + weight
+		} else {
+			g.nodeToSources[id2][id1] = weight
+		}
+	} else {
+		tmap := make(map[ID]float64)
+		tmap[id1] = weight
+		g.nodeToSources[id2] = tmap
+	}
+
+	return nil
+}
+
+func (g *graph) ReplaceEdge(id1, id2 ID, weight float64) error {
+	g.mu.Lock()
+	defer g.mu.Unlock()
+
+	if !g.unsafeExistID(id1) {
+		return fmt.Errorf("%s does not exist in the graph.", id1)
+	}
+	if !g.unsafeExistID(id2) {
+		return fmt.Errorf("%s does not exist in the graph.", id2)
+	}
+
+	if _, ok := g.nodeToTargets[id1]; ok {
+		g.nodeToTargets[id1][id2] = weight
+	} else {
+		tmap := make(map[ID]float64)
+		tmap[id2] = weight
+		g.nodeToTargets[id1] = tmap
+	}
+	if _, ok := g.nodeToSources[id2]; ok {
+		g.nodeToSources[id2][id1] = weight
+	} else {
+		tmap := make(map[ID]float64)
+		tmap[id1] = weight
+		g.nodeToSources[id2] = tmap
+	}
+	return nil
+}
+
+func (g *graph) DeleteEdge(id1, id2 ID) error {
+	g.mu.Lock()
+	defer g.mu.Unlock()
+
+	if !g.unsafeExistID(id1) {
+		return fmt.Errorf("%s does not exist in the graph.", id1)
+	}
+	if !g.unsafeExistID(id2) {
+		return fmt.Errorf("%s does not exist in the graph.", id2)
+	}
+
+	if _, ok := g.nodeToTargets[id1]; ok {
+		if _, ok := g.nodeToTargets[id1][id2]; ok {
+			delete(g.nodeToTargets[id1], id2)
+		}
+	}
+	if _, ok := g.nodeToSources[id2]; ok {
+		if _, ok := g.nodeToSources[id2][id1]; ok {
+			delete(g.nodeToSources[id2], id1)
+		}
+	}
+	return nil
+}
+
+func (g *graph) GetWeight(id1, id2 ID) (float64, error) {
+	g.mu.RLock()
+	defer g.mu.RUnlock()
+
+	if !g.unsafeExistID(id1) {
+		return 0, fmt.Errorf("%s does not exist in the graph.", id1)
+	}
+	if !g.unsafeExistID(id2) {
+		return 0, fmt.Errorf("%s does not exist in the graph.", id2)
+	}
+
+	if _, ok := g.nodeToTargets[id1]; ok {
+		if v, ok := g.nodeToTargets[id1][id2]; ok {
+			return v, nil
+		}
+	}
+	return 0.0, fmt.Errorf("there is no edge from %s to %s", id1, id2)
+}
+
+func (g *graph) GetSources(id ID) (map[ID]Node, error) {
+	g.mu.RLock()
+	defer g.mu.RUnlock()
+
+	if !g.unsafeExistID(id) {
+		return nil, fmt.Errorf("%s does not exist in the graph.", id)
+	}
+
+	rs := make(map[ID]Node)
+	if _, ok := g.nodeToSources[id]; ok {
+		for n := range g.nodeToSources[id] {
+			rs[n] = g.idToNodes[n]
+		}
+	}
+	return rs, nil
+}
+
+func (g *graph) GetTargets(id ID) (map[ID]Node, error) {
+	g.mu.RLock()
+	defer g.mu.RUnlock()
+
+	if !g.unsafeExistID(id) {
+		return nil, fmt.Errorf("%s does not exist in the graph.", id)
+	}
+
+	rs := make(map[ID]Node)
+	if _, ok := g.nodeToTargets[id]; ok {
+		for n := range g.nodeToTargets[id] {
+			rs[n] = g.idToNodes[n]
+		}
+	}
+	return rs, nil
+}
+
+func (g *graph) String() string {
+	g.mu.RLock()
+	defer g.mu.RUnlock()
+
+	buf := new(bytes.Buffer)
+	for id1, nd1 := range g.idToNodes {
+		nmap, _ := g.GetTargets(id1)
+		for id2, nd2 := range nmap {
+			weight, _ := g.GetWeight(id1, id2)
+			fmt.Fprintf(buf, "%s -- %.3f -→ %s\n", nd1, weight, nd2)
+		}
+	}
+	return buf.String()
+}
+
+// NewGraphFromJSON returns a new Graph from a JSON file.
+// Here's the sample JSON data:
+//
+//	{
+//	    "graph_00": {
+//	        "S": {
+//	            "A": 100,
+//	            "B": 14,
+//	            "C": 200
+//	        },
+//	        "A": {
+//	            "S": 15,
+//	            "B": 5,
+//	            "D": 20,
+//	            "T": 44
+//	        },
+//	        "B": {
+//	            "S": 14,
+//	            "A": 5,
+//	            "D": 30,
+//	            "E": 18
+//	        },
+//	        "C": {
+//	            "S": 9,
+//	            "E": 24
+//	        },
+//	        "D": {
+//	            "A": 20,
+//	            "B": 30,
+//	            "E": 2,
+//	            "F": 11,
+//	            "T": 16
+//	        },
+//	        "E": {
+//	            "B": 18,
+//	            "C": 24,
+//	            "D": 2,
+//	            "F": 6,
+//	            "T": 19
+//	        },
+//	        "F": {
+//	            "D": 11,
+//	            "E": 6,
+//	            "T": 6
+//	        },
+//	        "T": {
+//	            "A": 44,
+//	            "D": 16,
+//	            "F": 6,
+//	            "E": 19
+//	        }
+//	    },
+//	}
+//
+func NewGraphFromJSON(rd io.Reader, graphID string) (Graph, error) {
+	js := make(map[string]map[string]map[string]float64)
+	dec := json.NewDecoder(rd)
+	for {
+		if err := dec.Decode(&js); err == io.EOF {
+			break
+		} else if err != nil {
+			return nil, err
+		}
+	}
+	if _, ok := js[graphID]; !ok {
+		return nil, fmt.Errorf("%s does not exist", graphID)
+	}
+	gmap := js[graphID]
+
+	g := newGraph()
+	for id1, mm := range gmap {
+		nd1 := g.GetNode(StringID(id1))
+		if nd1 == nil {
+			nd1 = NewNode(id1)
+			if ok := g.AddNode(nd1); !ok {
+				return nil, fmt.Errorf("%s already exists", nd1)
+			}
+		}
+		for id2, weight := range mm {
+			nd2 := g.GetNode(StringID(id2))
+			if nd2 == nil {
+				nd2 = NewNode(id2)
+				if ok := g.AddNode(nd2); !ok {
+					return nil, fmt.Errorf("%s already exists", nd2)
+				}
+			}
+			g.ReplaceEdge(nd1.ID(), nd2.ID(), weight)
+		}
+	}
+
+	return g, nil
+}
diff --git a/vendor/github.com/gyuho/goraph/minimum_spanning_tree.go b/vendor/github.com/gyuho/goraph/minimum_spanning_tree.go
new file mode 100644
index 0000000..a86e279
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/minimum_spanning_tree.go
@@ -0,0 +1,287 @@
+package goraph
+
+import (
+	"container/heap"
+	"math"
+	"sort"
+)
+
+// Kruskal finds the minimum spanning tree with disjoint-set data structure.
+// (http://en.wikipedia.org/wiki/Kruskal%27s_algorithm)
+//
+//	 0. Kruskal(G)
+//	 1.
+//	 2. 	A = ∅
+//	 3.
+//	 4. 	for each vertex v in G:
+//	 5. 		MakeDisjointSet(v)
+//	 6.
+//	 7. 	edges = get all edges
+//	 8. 	sort edges in ascending order of weight
+//	 9.
+//	10. 	for each edge (u, v) in edges:
+//	11. 		if FindSet(u) ≠ FindSet(v):
+//	12. 			A = A ∪ {(u, v)}
+//	13. 			Union(u, v)
+//	14.
+//	15. 	return A
+//
+func Kruskal(g Graph) (map[Edge]struct{}, error) {
+
+	// A = ∅
+	A := make(map[Edge]struct{})
+
+	// disjointSet maps a member Node to a represent.
+	// (https://en.wikipedia.org/wiki/Disjoint-set_data_structure)
+	forests := NewForests()
+
+	// for each vertex v in G:
+	for _, nd := range g.GetNodes() {
+		// MakeDisjointSet(v)
+		MakeDisjointSet(forests, nd.String())
+	}
+
+	// edges = get all edges
+	edges := []Edge{}
+	foundEdge := make(map[string]struct{})
+	for id1, nd1 := range g.GetNodes() {
+		tm, err := g.GetTargets(id1)
+		if err != nil {
+			return nil, err
+		}
+		for id2, nd2 := range tm {
+			weight, err := g.GetWeight(id1, id2)
+			if err != nil {
+				return nil, err
+			}
+			edge := NewEdge(nd1, nd2, weight)
+			if _, ok := foundEdge[edge.String()]; !ok {
+				edges = append(edges, edge)
+				foundEdge[edge.String()] = struct{}{}
+			}
+		}
+
+		sm, err := g.GetSources(id1)
+		if err != nil {
+			return nil, err
+		}
+		for id3, nd3 := range sm {
+			weight, err := g.GetWeight(id3, id1)
+			if err != nil {
+				return nil, err
+			}
+			edge := NewEdge(nd3, nd1, weight)
+			if _, ok := foundEdge[edge.String()]; !ok {
+				edges = append(edges, edge)
+				foundEdge[edge.String()] = struct{}{}
+			}
+		}
+	}
+
+	// sort edges in ascending order of weight
+	sort.Sort(EdgeSlice(edges))
+
+	// for each edge (u, v) in edges:
+	for _, edge := range edges {
+		// if FindSet(u) ≠ FindSet(v):
+		if FindSet(forests, edge.Source().String()).represent != FindSet(forests, edge.Target().String()).represent {
+
+			// A = A ∪ {(u, v)}
+			A[edge] = struct{}{}
+
+			// Union(u, v)
+			// overwrite v's represent with u's represent
+			Union(forests, FindSet(forests, edge.Source().String()), FindSet(forests, edge.Target().String()))
+		}
+	}
+
+	return A, nil
+}
+
+// Prim finds the minimum spanning tree with min-heap (priority queue).
+// (http://en.wikipedia.org/wiki/Prim%27s_algorithm)
+//
+//	 0. Prim(G, source)
+//	 1.
+//	 2. 	let Q be a priority queue
+//	 3. 	distance[source] = 0
+//	 4.
+//	 5. 	for each vertex v in G:
+//	 6.
+//	 7. 		if v ≠ source:
+//	 8. 			distance[v] = ∞
+//	 9. 			prev[v] = undefined
+//	10.
+//	11. 		Q.add_with_priority(v, distance[v])
+//	12.
+//	13.
+//	14. 	while Q is not empty:
+//	15.
+//	16. 		u = Q.extract_min()
+//	17.
+//	18. 		for each adjacent vertex v of u:
+//	19.
+//	21. 			if v ∈ Q and distance[v] > weight(u, v):
+//	22. 				distance[v] = weight(u, v)
+//	23. 				prev[v] = u
+//	24. 				Q.decrease_priority(v, weight(u, v))
+//	25.
+//	26.
+//	27. 	return tree from prev
+//
+func Prim(g Graph, src ID) (map[Edge]struct{}, error) {
+
+	// let Q be a priority queue
+	minHeap := &nodeDistanceHeap{}
+
+	// distance[source] = 0
+	distance := make(map[ID]float64)
+	distance[src] = 0.0
+
+	// for each vertex v in G:
+	for id := range g.GetNodes() {
+
+		// if v ≠ src:
+		if id != src {
+			// distance[v] = ∞
+			distance[id] = math.MaxFloat64
+
+			// prev[v] = undefined
+			// prev[v] = ""
+		}
+
+		// Q.add_with_priority(v, distance[v])
+		nds := nodeDistance{}
+		nds.id = id
+		nds.distance = distance[id]
+
+		heap.Push(minHeap, nds)
+	}
+
+	heap.Init(minHeap)
+	prev := make(map[ID]ID)
+
+	// while Q is not empty:
+	for minHeap.Len() != 0 {
+
+		// u = Q.extract_min()
+		u := heap.Pop(minHeap).(nodeDistance)
+		uID := u.id
+
+		// for each adjacent vertex v of u:
+		tm, err := g.GetTargets(uID)
+		if err != nil {
+			return nil, err
+		}
+		for vID := range tm {
+
+			isExist := false
+			for _, one := range *minHeap {
+				if vID == one.id {
+					isExist = true
+					break
+				}
+			}
+
+			// weight(u, v)
+			weight, err := g.GetWeight(uID, vID)
+			if err != nil {
+				return nil, err
+			}
+
+			// if v ∈ Q and distance[v] > weight(u, v):
+			if isExist && distance[vID] > weight {
+
+				// distance[v] = weight(u, v)
+				distance[vID] = weight
+
+				// prev[v] = u
+				prev[vID] = uID
+
+				// Q.decrease_priority(v, weight(u, v))
+				minHeap.updateDistance(vID, weight)
+				heap.Init(minHeap)
+			}
+		}
+
+		sm, err := g.GetSources(uID)
+		if err != nil {
+			return nil, err
+		}
+		vID := uID
+		for uID := range sm {
+
+			isExist := false
+			for _, one := range *minHeap {
+				if vID == one.id {
+					isExist = true
+					break
+				}
+			}
+
+			// weight(u, v)
+			weight, err := g.GetWeight(uID, vID)
+			if err != nil {
+				return nil, err
+			}
+
+			// if v ∈ Q and distance[v] > weight(u, v):
+			if isExist && distance[vID] > weight {
+
+				// distance[v] = weight(u, v)
+				distance[vID] = weight
+
+				// prev[v] = u
+				prev[vID] = uID
+
+				// Q.decrease_priority(v, weight(u, v))
+				minHeap.updateDistance(vID, weight)
+				heap.Init(minHeap)
+			}
+		}
+	}
+
+	tree := make(map[Edge]struct{})
+	for k, v := range prev {
+		weight, err := g.GetWeight(v, k)
+		if err != nil {
+			return nil, err
+		}
+		tree[NewEdge(g.GetNode(v), g.GetNode(k), weight)] = struct{}{}
+	}
+	return tree, nil
+}
+
+type nodeDistance struct {
+	id       ID
+	distance float64
+}
+
+// container.Heap's Interface needs sort.Interface, Push, Pop to be implemented
+
+// nodeDistanceHeap is a min-heap of nodeDistances.
+type nodeDistanceHeap []nodeDistance
+
+func (h nodeDistanceHeap) Len() int           { return len(h) }
+func (h nodeDistanceHeap) Less(i, j int) bool { return h[i].distance < h[j].distance } // Min-Heap
+func (h nodeDistanceHeap) Swap(i, j int)      { h[i], h[j] = h[j], h[i] }
+
+func (h *nodeDistanceHeap) Push(x interface{}) {
+	*h = append(*h, x.(nodeDistance))
+}
+
+func (h *nodeDistanceHeap) Pop() interface{} {
+	heapSize := len(*h)
+	lastNode := (*h)[heapSize-1]
+	*h = (*h)[0 : heapSize-1]
+	return lastNode
+}
+
+func (h *nodeDistanceHeap) updateDistance(id ID, val float64) {
+	for i := 0; i < len(*h); i++ {
+		if (*h)[i].id == id {
+			(*h)[i].distance = val
+			break
+		}
+	}
+}
diff --git a/vendor/github.com/gyuho/goraph/shortest_path.go b/vendor/github.com/gyuho/goraph/shortest_path.go
new file mode 100644
index 0000000..e6f405c
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/shortest_path.go
@@ -0,0 +1,348 @@
+package goraph
+
+import (
+	"container/heap"
+	"fmt"
+	"math"
+)
+
+// Dijkstra returns the shortest path using Dijkstra
+// algorithm with a min-priority queue. This algorithm
+// does not work with negative weight edges.
+// (https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm)
+//
+//	 0. Dijkstra(G, source, target)
+//	 1.
+//	 2. 	let Q be a priority queue
+//	 3. 	distance[source] = 0
+//	 4.
+//	 5. 	for each vertex v in G:
+//	 6.
+//	 7. 		if v ≠ source:
+//	 8. 			distance[v] = ∞
+//	 9. 			prev[v] = undefined
+//	10.
+//	11. 		Q.add_with_priority(v, distance[v])
+//	12.
+//	13. 	while Q is not empty:
+//	14.
+//	15. 		u = Q.extract_min()
+//	16. 		if u == target:
+//	17. 			break
+//	18.
+//	19. 		for each child vertex v of u:
+//	20.
+//	21. 			alt = distance[u] + weight(u, v)
+//	22. 			if distance[v] > alt:
+//	23. 				distance[v] = alt
+//	24. 				prev[v] = u
+//	25. 				Q.decrease_priority(v, alt)
+//	26.
+//	27. 		reheapify(Q)
+//	28.
+//	29.
+//	30. 	path = []
+//	31. 	u = target
+//	32. 	while prev[u] is defined:
+//	33. 		path.push_front(u)
+//	34. 		u = prev[u]
+//	35.
+//	36. 	return path, prev
+//
+func Dijkstra(g Graph, source, target ID) ([]ID, map[ID]float64, error) {
+	// let Q be a priority queue
+	minHeap := &nodeDistanceHeap{}
+
+	// distance[source] = 0
+	distance := make(map[ID]float64)
+	distance[source] = 0.0
+
+	// for each vertex v in G:
+	for id := range g.GetNodes() {
+		// if v ≠ source:
+		if id != source {
+			// distance[v] = ∞
+			distance[id] = math.MaxFloat64
+
+			// prev[v] = undefined
+			// prev[v] = ""
+		}
+
+		// Q.add_with_priority(v, distance[v])
+		nds := nodeDistance{}
+		nds.id = id
+		nds.distance = distance[id]
+
+		heap.Push(minHeap, nds)
+	}
+
+	heap.Init(minHeap)
+	prev := make(map[ID]ID)
+
+	// while Q is not empty:
+	for minHeap.Len() != 0 {
+
+		// u = Q.extract_min()
+		u := heap.Pop(minHeap).(nodeDistance)
+
+		// if u == target:
+		if u.id == target {
+			break
+		}
+
+		// for each child vertex v of u:
+		cmap, err := g.GetTargets(u.id)
+		if err != nil {
+			return nil, nil, err
+		}
+		for v := range cmap {
+
+			// alt = distance[u] + weight(u, v)
+			weight, err := g.GetWeight(u.id, v)
+			if err != nil {
+				return nil, nil, err
+			}
+			alt := distance[u.id] + weight
+
+			// if distance[v] > alt:
+			if distance[v] > alt {
+
+				// distance[v] = alt
+				distance[v] = alt
+
+				// prev[v] = u
+				prev[v] = u.id
+
+				// Q.decrease_priority(v, alt)
+				minHeap.updateDistance(v, alt)
+			}
+		}
+		heap.Init(minHeap)
+	}
+
+	// path = []
+	path := []ID{}
+
+	// u = target
+	u := target
+
+	// while prev[u] is defined:
+	for {
+		if _, ok := prev[u]; !ok {
+			break
+		}
+		// path.push_front(u)
+		temp := make([]ID, len(path)+1)
+		temp[0] = u
+		copy(temp[1:], path)
+		path = temp
+
+		// u = prev[u]
+		u = prev[u]
+	}
+
+	// add the source
+	temp := make([]ID, len(path)+1)
+	temp[0] = source
+	copy(temp[1:], path)
+	path = temp
+
+	return path, distance, nil
+}
+
+// BellmanFord returns the shortest path using Bellman-Ford algorithm
+// This algorithm works with negative weight edges.
+// Time complexity is O(|V||E|).
+// (http://courses.csail.mit.edu/6.006/spring11/lectures/lec15.pdf)
+// It returns error when there is a negative-weight cycle.
+// A negatively-weighted cycle adds up to infinite negative-weight.
+//
+//	 0. BellmanFord(G, source, target)
+//	 1.
+//	 2. 	distance[source] = 0
+//	 3.
+//	 4. 	for each vertex v in G:
+//	 5.
+//	 6. 		if v ≠ source:
+//	 7. 			distance[v] = ∞
+//	 8. 			prev[v] = undefined
+//	 9.
+//	10.
+//	11. 	for 1 to |V|-1:
+//	12.
+//	13. 		for every edge (u, v):
+//	14.
+//	15. 			alt = distance[u] + weight(u, v)
+//	16. 			if distance[v] > alt:
+//	17. 				distance[v] = alt
+//	18. 				prev[v] = u
+//	19.
+//	20.
+//	21. 	for every edge (u, v):
+//	22.
+//	23. 		alt = distance[u] + weight(u, v)
+//	24. 		if distance[v] > alt:
+//	25. 			there is a negative-weight cycle
+//	26.
+//	27.
+//	28. 	path = []
+//	29. 	u = target
+//	30. 	while prev[u] is defined:
+//	31. 		path.push_front(u)
+//	32. 		u = prev[u]
+//	33.
+//	34. 	return path, prev
+//
+func BellmanFord(g Graph, source, target ID) ([]ID, map[ID]float64, error) {
+	// distance[source] = 0
+	distance := make(map[ID]float64)
+	distance[source] = 0.0
+
+	// for each vertex v in G:
+	for id := range g.GetNodes() {
+
+		// if v ≠ source:
+		if id != source {
+			// distance[v] = ∞
+			distance[id] = math.MaxFloat64
+
+			// prev[v] = undefined
+			// prev[v] = ""
+		}
+	}
+
+	prev := make(map[ID]ID)
+
+	// for 1 to |V|-1:
+	for i := 1; i <= g.GetNodeCount()-1; i++ {
+
+		// for every edge (u, v):
+		for id := range g.GetNodes() {
+
+			cmap, err := g.GetTargets(id)
+			if err != nil {
+				return nil, nil, err
+			}
+			u := id
+			for v := range cmap {
+				// edge (u, v)
+				weight, err := g.GetWeight(u, v)
+				if err != nil {
+					return nil, nil, err
+				}
+
+				// alt = distance[u] + weight(u, v)
+				alt := distance[u] + weight
+
+				// if distance[v] > alt:
+				if distance[v] > alt {
+					// distance[v] = alt
+					distance[v] = alt
+
+					// prev[v] = u
+					prev[v] = u
+				}
+			}
+
+			pmap, err := g.GetSources(id)
+			if err != nil {
+				return nil, nil, err
+			}
+			v := id
+			for u := range pmap {
+				// edge (u, v)
+				weight, err := g.GetWeight(u, v)
+				if err != nil {
+					return nil, nil, err
+				}
+
+				// alt = distance[u] + weight(u, v)
+				alt := distance[u] + weight
+
+				// if distance[v] > alt:
+				if distance[v] > alt {
+					// distance[v] = alt
+					distance[v] = alt
+
+					// prev[v] = u
+					prev[v] = u
+				}
+			}
+		}
+	}
+
+	// for every edge (u, v):
+	for id := range g.GetNodes() {
+
+		cmap, err := g.GetTargets(id)
+		if err != nil {
+			return nil, nil, err
+		}
+		u := id
+		for v := range cmap {
+			// edge (u, v)
+			weight, err := g.GetWeight(u, v)
+			if err != nil {
+				return nil, nil, err
+			}
+
+			// alt = distance[u] + weight(u, v)
+			alt := distance[u] + weight
+
+			// if distance[v] > alt:
+			if distance[v] > alt {
+				return nil, nil, fmt.Errorf("there is a negative-weight cycle: %v", g)
+			}
+		}
+
+		pmap, err := g.GetSources(id)
+		if err != nil {
+			return nil, nil, err
+		}
+		v := id
+		for u := range pmap {
+			// edge (u, v)
+			weight, err := g.GetWeight(u, v)
+			if err != nil {
+				return nil, nil, err
+			}
+
+			// alt = distance[u] + weight(u, v)
+			alt := distance[u] + weight
+
+			// if distance[v] > alt:
+			if distance[v] > alt {
+				return nil, nil, fmt.Errorf("there is a negative-weight cycle: %v", g)
+			}
+		}
+	}
+
+	// path = []
+	path := []ID{}
+
+	// u = target
+	u := target
+
+	// while prev[u] is defined:
+	for {
+		if _, ok := prev[u]; !ok {
+			break
+		}
+		// path.push_front(u)
+		temp := make([]ID, len(path)+1)
+		temp[0] = u
+		copy(temp[1:], path)
+		path = temp
+
+		// u = prev[u]
+		u = prev[u]
+	}
+
+	// add the source
+	temp := make([]ID, len(path)+1)
+	temp[0] = source
+	copy(temp[1:], path)
+	path = temp
+
+	return path, distance, nil
+}
diff --git a/vendor/github.com/gyuho/goraph/strongly_connected_components.go b/vendor/github.com/gyuho/goraph/strongly_connected_components.go
new file mode 100644
index 0000000..b0a11a5
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/strongly_connected_components.go
@@ -0,0 +1,195 @@
+package goraph
+
+import "sync"
+
+// Tarjan finds the strongly connected components.
+// In the mathematics, a directed graph is "strongly connected"
+// if every vertex is reachable from every other node.
+// Therefore, a graph is strongly connected if there is a path
+// in each direction between each pair of node of a graph.
+// Then a pair of vertices u and v is strongly connected to each other
+// because there is a path in each direction.
+// "Strongly connected components" of an arbitrary graph
+// partition into sub-graphs that are themselves strongly connected.
+// That is, "strongly connected component" of a directed graph
+// is a sub-graph that is strongly connected.
+// Formally, "Strongly connected components" of a graph is a maximal
+// set of vertices C in G.V such that for all u, v ∈ C, there is a path
+// both from u to v, and from v to u.
+// (https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm)
+//
+//	 0. Tarjan(G):
+//	 1.
+//	 2. 	globalIndex = 0 // smallest unused index
+//	 3. 	let S be a stack
+//	 4. 	result = [][]
+//	 5.
+//	 6. 	for each vertex v in G:
+//	 7. 		if v.index is undefined:
+//	 8. 			tarjan(G, v, globalIndex, S, result)
+//	 9.
+//	10. 	return result
+//	11.
+//	12.
+//	13. tarjan(G, v, globalIndex, S, result):
+//	14.
+//	15. 	v.index = globalIndex
+//	16. 	v.lowLink = globalIndex
+//	17. 	globalIndex++
+//	18. 	S.push(v)
+//	19.
+//	20. 	for each child vertex w of v:
+//	21.
+//	22. 		if w.index is undefined:
+//	23. 			recursively tarjan(G, w, globalIndex, S, result)
+//	24. 			v.lowLink = min(v.lowLink, w.lowLink)
+//	25.
+//	26. 		else if w is in S:
+//	27. 			v.lowLink = min(v.lowLink, w.index)
+//	28.
+//	29. 	// if v is the root
+//	30. 	if v.lowLink == v.index:
+//	31.
+//	32. 		// start a new strongly connected component
+//	33. 		component = []
+//	34.
+//	35. 		while True:
+//	36.
+//	37. 			u = S.pop()
+//	38. 			component.push(u)
+//	39.
+//	40. 			if u == v:
+//	41. 				result.push(component)
+//	42. 				break
+//
+func Tarjan(g Graph) [][]ID {
+	d := newTarjanData()
+
+	// for each vertex v in G:
+	for v := range g.GetNodes() {
+		// if v.index is undefined:
+		if _, ok := d.index[v]; !ok {
+			// tarjan(G, v, globalIndex, S, result)
+			tarjan(g, v, d)
+		}
+	}
+	return d.result
+}
+
+type tarjanData struct {
+	mu sync.Mutex // guards the following
+
+	// globalIndex is the smallest unused index
+	globalIndex int
+
+	// index is an index of a node to record
+	// the order of being discovered.
+	index map[ID]int
+
+	// lowLink is the smallest index of any index
+	// reachable from v, including v itself.
+	lowLink map[ID]int
+
+	// S is the stack.
+	S []ID
+
+	// extra map to check if a vertex is in S.
+	smap map[ID]struct{}
+
+	result [][]ID
+}
+
+func newTarjanData() *tarjanData {
+	return &tarjanData{
+		globalIndex: 0,
+		index:       make(map[ID]int),
+		lowLink:     make(map[ID]int),
+		S:           []ID{},
+		smap:        make(map[ID]struct{}),
+		result:      [][]ID{},
+	}
+}
+
+func tarjan(
+	g Graph,
+	id ID,
+	data *tarjanData,
+) {
+	// This is not inherently parallelizable problem,
+	// but just to make sure.
+	data.mu.Lock()
+
+	// v.index = globalIndex
+	data.index[id] = data.globalIndex
+
+	// v.lowLink = globalIndex
+	data.lowLink[id] = data.globalIndex
+
+	// globalIndex++
+	data.globalIndex++
+
+	// S.push(v)
+	data.S = append(data.S, id)
+	data.smap[id] = struct{}{}
+
+	data.mu.Unlock()
+
+	// for each child vertex w of v:
+	cmap, err := g.GetTargets(id)
+	if err != nil {
+		panic(err)
+	}
+	for w := range cmap {
+
+		// if w.index is undefined:
+		if _, ok := data.index[w]; !ok {
+
+			// recursively tarjan(G, w, globalIndex, S, result)
+			tarjan(g, w, data)
+
+			// v.lowLink = min(v.lowLink, w.lowLink)
+			data.lowLink[id] = min(data.lowLink[id], data.lowLink[w])
+
+		} else if _, ok := data.smap[w]; ok {
+			// else if w is in S:
+
+			// v.lowLink = min(v.lowLink, w.index)
+			data.lowLink[id] = min(data.lowLink[id], data.index[w])
+		}
+	}
+
+	data.mu.Lock()
+	defer data.mu.Unlock()
+
+	// if v is the root
+	// if v.lowLink == v.index:
+	if data.lowLink[id] == data.index[id] {
+		// start a new strongly connected component
+		component := []ID{}
+
+		// while True:
+		for {
+
+			// u = S.pop()
+			u := data.S[len(data.S)-1]
+			data.S = data.S[:len(data.S)-1 : len(data.S)-1]
+			delete(data.smap, u)
+
+			// component.push(u)
+			component = append(component, u)
+
+			// if u == v:
+			if u == id {
+				data.result = append(data.result, component)
+				break
+			}
+		}
+	}
+}
+
+func min(a, b int) int {
+	if a < b {
+		return a
+	}
+	return b
+}
diff --git a/vendor/github.com/gyuho/goraph/test b/vendor/github.com/gyuho/goraph/test
new file mode 100755
index 0000000..a7d8a6d
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/test
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+TEST=./...;
+FMT="*.go"
+
+echo "Running tests...";
+go test -v -cover -cpu 1,2,4 $TEST;
+go test -v -cover -cpu 1,2,4 -race $TEST;
+
+echo "Checking gofmt..."
+fmtRes=$(gofmt -l -s $FMT)
+if [ -n "${fmtRes}" ]; then
+	echo -e "gofmt checking failed:\n${fmtRes}"
+	exit 255
+fi
+
+echo "Checking govet..."
+vetRes=$(go vet $TEST)
+if [ -n "${vetRes}" ]; then
+	echo -e "govet checking failed:\n${vetRes}"
+	exit 255
+fi
+
+echo "Success";
diff --git a/vendor/github.com/gyuho/goraph/topological_sort.go b/vendor/github.com/gyuho/goraph/topological_sort.go
new file mode 100644
index 0000000..b63675a
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/topological_sort.go
@@ -0,0 +1,98 @@
+package goraph
+
+// TopologicalSort does topological sort(ordering) with DFS.
+// It returns true if the graph is a DAG (no cycle, with a topological sort).
+// False if the graph is not a DAG (cycle, with no topological sort).
+//
+//	 0. TopologicalSort(G)
+//	 1.
+//	 2. 	L = Empty list that will contain the sorted nodes
+//	 3. 	isDAG = true
+//	 4.
+//	 5. 	for each vertex v in G:
+//	 6.
+//	 7. 		if v.color == "white":
+//	 8.
+//	 9. 			topologicalSortVisit(v, L, isDAG)
+//	10.
+//	11.
+//	12.
+//	13.
+//	14. topologicalSortVisit(v, L, isDAG)
+//	15.
+//	16. 	if v.color == "gray":
+//	17. 		isDAG = false
+//	18. 		return
+//	19.
+//	20. 	if v.color == "white":
+//	21.
+//	22. 		v.color = "gray":
+//	23.
+//	24.			for each child vertex w of v:
+//	25. 			topologicalSortVisit(w, L, isDAG)
+//	26.
+//	27. 		v.color = "black"
+//	28.			L.push_front(v)
+//
+func TopologicalSort(g Graph) ([]ID, bool) {
+
+	// L = Empty list that will contain the sorted nodes
+	L := []ID{}
+	isDAG := true
+	color := make(map[ID]string)
+	for v := range g.GetNodes() {
+		color[v] = "white"
+	}
+
+	// for each vertex v in G:
+	for v := range g.GetNodes() {
+		// if v.color == "white":
+		if color[v] == "white" {
+			// topologicalSortVisit(v, L, isDAG)
+			topologicalSortVisit(g, v, &L, &isDAG, &color)
+		}
+	}
+
+	return L, isDAG
+}
+
+func topologicalSortVisit(
+	g Graph,
+	id ID,
+	L *[]ID,
+	isDAG *bool,
+	color *map[ID]string,
+) {
+
+	// if v.color == "gray":
+	if (*color)[id] == "gray" {
+		// isDAG = false
+		*isDAG = false
+		return
+	}
+
+	// if v.color == "white":
+	if (*color)[id] == "white" {
+		// v.color = "gray":
+		(*color)[id] = "gray"
+
+		// for each child vertex w of v:
+		cmap, err := g.GetTargets(id)
+		if err != nil {
+			panic(err)
+		}
+		for w := range cmap {
+			// topologicalSortVisit(w, L, isDAG)
+			topologicalSortVisit(g, w, L, isDAG, color)
+		}
+
+		// v.color = "black"
+		(*color)[id] = "black"
+
+		// L.push_front(v)
+		temp := make([]ID, len(*L)+1)
+		temp[0] = id
+		copy(temp[1:], *L)
+		*L = temp
+	}
+}
diff --git a/vendor/github.com/gyuho/goraph/traversal.go b/vendor/github.com/gyuho/goraph/traversal.go
new file mode 100644
index 0000000..fa45c29
--- /dev/null
+++ b/vendor/github.com/gyuho/goraph/traversal.go
@@ -0,0 +1,184 @@
+package goraph
+
+// BFS does breadth-first search, and returns the list of vertices.
+// (https://en.wikipedia.org/wiki/Breadth-first_search)
+//
+//	 0. BFS(G, v):
+//	 1.
+//	 2. 	let Q be a queue
+//	 3. 	Q.push(v)
+//	 4. 	label v as visited
+//	 5.
+//	 6. 	while Q is not empty:
+//	 7.
+//	 8. 		u = Q.dequeue()
+//	 9.
+//	10. 		for each vertex w adjacent to u:
+//	11.
+//	12. 			if w is not visited yet:
+//	13. 				Q.push(w)
+//	14. 				label w as visited
+//
+func BFS(g Graph, id ID) []ID {
+	if g.GetNode(id) == nil {
+		return nil
+	}
+
+	q := []ID{id}
+	visited := make(map[ID]bool)
+	visited[id] = true
+	rs := []ID{id}
+
+	// while Q is not empty:
+	for len(q) != 0 {
+
+		u := q[0]
+		q = q[1:len(q):len(q)]
+
+		// for each vertex w adjacent to u:
+		cmap, _ := g.GetTargets(u)
+		for _, w := range cmap {
+			// if w is not visited yet:
+			if _, ok := visited[w.ID()]; !ok {
+				q = append(q, w.ID())  // Q.push(w)
+				visited[w.ID()] = true // label w as visited
+
+				rs = append(rs, w)
+			}
+		}
+		pmap, _ := g.GetSources(u)
+		for _, w := range pmap {
+			// if w is not visited yet:
+			if _, ok := visited[w.ID()]; !ok {
+				q = append(q, w.ID())  // Q.push(w)
+				visited[w.ID()] = true // label w as visited
+
+				rs = append(rs, w.ID())
+			}
+		}
+	}
+
+	return rs
+}
+
+// DFS does depth-first search, and returns the list of vertices.
+// (https://en.wikipedia.org/wiki/Depth-first_search)
+//
+//	 0. DFS(G, v):
+//	 1.
+//	 2. 	let S be a stack
+//	 3. 	S.push(v)
+//	 4.
+//	 5. 	while S is not empty:
+//	 6.
+//	 7. 		u = S.pop()
+//	 8.
+//	 9. 		if u is not visited yet:
+//	10.
+//	11. 			label u as visited
+//	12.
+//	13. 			for each vertex w adjacent to u:
+//	14.
+//	15. 				if w is not visited yet:
+//	16. 					S.push(w)
+//
+func DFS(g Graph, id ID) []ID {
+	if g.GetNode(id) == nil {
+		return nil
+	}
+
+	s := []ID{id}
+	visited := make(map[ID]bool)
+	rs := []ID{}
+
+	// while S is not empty:
+	for len(s) != 0 {
+
+		u := s[len(s)-1]
+		s = s[:len(s)-1 : len(s)-1]
+
+		// if u is not visited yet:
+		if _, ok := visited[u]; !ok {
+			// label u as visited
+			visited[u] = true
+
+			rs = append(rs, u)
+
+			// for each vertex w adjacent to u:
+			cmap, _ := g.GetTargets(u)
+			for _, w := range cmap {
+				// if w is not visited yet:
+				if _, ok := visited[w.ID()]; !ok {
+					s = append(s, w.ID()) // S.push(w)
+				}
+			}
+			pmap, _ := g.GetSources(u)
+			for _, w := range pmap {
+				// if w is not visited yet:
+				if _, ok := visited[w.ID()]; !ok {
+					s = append(s, w.ID()) // S.push(w)
+				}
+			}
+		}
+	}
+
+	return rs
+}
+
+// DFSRecursion does depth-first search recursively.
+//
+//	 0. DFS(G, v):
+//	 1.
+//	 2. 	if v is visited:
+//	 3. 		return
+//	 4.
+//	 5. 	label v as visited
+//	 6.
+//	 7. 	for each vertex u adjacent to v:
+//	 8.
+//	 9. 		if u is not visited yet:
+//	10. 			recursive DFS(G, u)
+//
+func DFSRecursion(g Graph, id ID) []ID {
+	if g.GetNode(id) == nil {
+		return nil
+	}
+
+	visited := make(map[ID]bool)
+	rs := []ID{}
+
+	dfsRecursion(g, id, visited, &rs)
+
+	return rs
+}
+
+func dfsRecursion(g Graph, id ID, visited map[ID]bool, rs *[]ID) {
+	// base case of recursion
+	//
+	// if v is visited:
+	if _, ok := visited[id]; ok {
+		return
+	}
+
+	// label v as visited
+	visited[id] = true
+	*rs = append(*rs, id)
+
+	// for each vertex u adjacent to v:
+	cmap, _ := g.GetTargets(id)
+	for _, u := range cmap {
+		// if u is not visited yet:
+		if _, ok := visited[u.ID()]; !ok {
+			// recursive DFS(G, u)
+			dfsRecursion(g, u.ID(), visited, rs)
+		}
+	}
+	pmap, _ := g.GetSources(id)
+	for _, u := range pmap {
+		// if u is not visited yet:
+		if _, ok := visited[u.ID()]; !ok {
+			// recursive DFS(G, u)
+			dfsRecursion(g, u.ID(), visited, rs)
+		}
+	}
+}
diff --git a/vendor/github.com/hashicorp/consul/LICENSE b/vendor/github.com/hashicorp/consul/LICENSE
new file mode 100644
index 0000000..c33dcc7
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+     means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of version
+        1.1 or earlier of the License, but not also under the terms of a
+        Secondary License.
+
+1.6. “Executable Form”
+
+     means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+     means a work that combines Covered Software with other material, in a separate
+     file or files, that is not Covered Software.
+
+1.8. “License”
+
+     means this document.
+
+1.9. “Licensable”
+
+     means having the right to grant, to the maximum extent possible, whether at the
+     time of the initial grant or subsequently, any and all of the rights conveyed by
+     this License.
+
+1.10. “Modifications”
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to, deletion
+        from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+      means any patent claim(s), including without limitation, method, process,
+      and apparatus claims, in any patent Licensable by such Contributor that
+      would be infringed, but for the grant of the License, by the making,
+      using, selling, offering for sale, having made, import, or transfer of
+      either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+      means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, “You” includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, “control” means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or as
+        part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its Contributions
+        or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution become
+     effective for each Contribution on the date the Contributor first distributes
+     such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under this
+     License. No additional rights or licenses will be implied from the distribution
+     or licensing of Covered Software under this License. Notwithstanding Section
+     2.1(b) above, no patent license is granted by a Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party’s
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of its
+        Contributions.
+
+     This License does not grant any rights in the trademarks, service marks, or
+     logos of any Contributor (except as may be necessary to comply with the
+     notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this License
+     (see Section 10.2) or under the terms of a Secondary License (if permitted
+     under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its Contributions
+     are its original creation(s) or it has sufficient rights to grant the
+     rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under applicable
+     copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under the
+     terms of this License. You must inform recipients that the Source Code Form
+     of the Covered Software is governed by the terms of this License, and how
+     they can obtain a copy of this License. You may not attempt to alter or
+     restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this License,
+        or sublicense it under different terms, provided that the license for
+        the Executable Form does not attempt to limit or alter the recipients’
+        rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for the
+     Covered Software. If the Larger Work is a combination of Covered Software
+     with a work governed by one or more Secondary Licenses, and the Covered
+     Software is not Incompatible With Secondary Licenses, this License permits
+     You to additionally distribute such Covered Software under the terms of
+     such Secondary License(s), so that the recipient of the Larger Work may, at
+     their option, further distribute the Covered Software under the terms of
+     either this License or such Secondary License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices (including
+     copyright notices, patent notices, disclaimers of warranty, or limitations
+     of liability) contained within the Source Code Form of the Covered
+     Software, except that You may alter any license notices to the extent
+     required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on behalf
+     of any Contributor. You must make it absolutely clear that any such
+     warranty, support, indemnity, or liability obligation is offered by You
+     alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute, judicial
+   order, or regulation then You must: (a) comply with the terms of this License
+   to the maximum extent possible; and (b) describe the limitations and the code
+   they affect. Such description must be placed in a text file included with all
+   distributions of the Covered Software under this License. Except to the
+   extent prohibited by statute or regulation, such description must be
+   sufficiently detailed for a recipient of ordinary skill to be able to
+   understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+     if such Contributor fails to notify You of the non-compliance by some
+     reasonable means prior to 60 days after You have come back into compliance.
+     Moreover, Your grants from a particular Contributor are reinstated on an
+     ongoing basis if such Contributor notifies You of the non-compliance by
+     some reasonable means, this is the first time You have received notice of
+     non-compliance with this License from such Contributor, and You become
+     compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions, counter-claims,
+     and cross-claims) alleging that a Contributor Version directly or
+     indirectly infringes any patent, then the rights granted to You by any and
+     all Contributors for the Covered Software under Section 2.1 of this License
+     shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an “as is” basis, without
+   warranty of any kind, either expressed, implied, or statutory, including,
+   without limitation, warranties that the Covered Software is free of defects,
+   merchantable, fit for a particular purpose or non-infringing. The entire
+   risk as to the quality and performance of the Covered Software is with You.
+   Should any Covered Software prove defective in any respect, You (not any
+   Contributor) assume the cost of any necessary servicing, repair, or
+   correction. This disclaimer of warranty constitutes an essential part of this
+   License. No use of  any Covered Software is authorized under this License
+   except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from such
+   party’s negligence to the extent applicable law prohibits such limitation.
+   Some jurisdictions do not allow the exclusion or limitation of incidental or
+   consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts of
+   a jurisdiction where the defendant maintains its principal place of business
+   and such litigation shall be governed by laws of that jurisdiction, without
+   reference to its conflict-of-law provisions. Nothing in this Section shall
+   prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject matter
+   hereof. If any provision of this License is held to be unenforceable, such
+   provision shall be reformed only to the extent necessary to make it
+   enforceable. Any law or regulation which provides that the language of a
+   contract shall be construed against the drafter shall not be used to construe
+   this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version of
+      the License under which You originally received the Covered Software, or
+      under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a modified
+      version of this License if you rename the license and remove any
+      references to the name of the license steward (except to note that such
+      modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+      If You choose to distribute Source Code Form that is Incompatible With
+      Secondary Licenses under the terms of this version of the License, the
+      notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+      This Source Code Form is “Incompatible
+      With Secondary Licenses”, as defined by
+      the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/consul/NOTICE.md b/vendor/github.com/hashicorp/consul/NOTICE.md
new file mode 100644
index 0000000..fe34b5e
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/NOTICE.md
@@ -0,0 +1,3 @@
+Copyright © 2014-2018 HashiCorp, Inc.
+
+This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this project, you can obtain one at http://mozilla.org/MPL/2.0/.
diff --git a/vendor/github.com/hashicorp/consul/api/README.md b/vendor/github.com/hashicorp/consul/api/README.md
new file mode 100644
index 0000000..3255cbb
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/README.md
@@ -0,0 +1,67 @@
+Consul API client
+=================
+
+This package provides the `api` package which attempts to
+provide programmatic access to the full Consul API.
+
+Currently, all of the Consul APIs included in version 0.6.0 are supported.
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api)
+
+Usage
+=====
+
+Below is an example of using the Consul client:
+
+```go
+package main
+
+import "github.com/hashicorp/consul/api"
+import "fmt"
+
+func main() {
+	// Get a new client
+	client, err := api.NewClient(api.DefaultConfig())
+	if err != nil {
+		panic(err)
+	}
+
+	// Get a handle to the KV API
+	kv := client.KV()
+
+	// PUT a new KV pair
+	p := &api.KVPair{Key: "REDIS_MAXCLIENTS", Value: []byte("1000")}
+	_, err = kv.Put(p, nil)
+	if err != nil {
+		panic(err)
+	}
+
+	// Lookup the pair
+	pair, _, err := kv.Get("REDIS_MAXCLIENTS", nil)
+	if err != nil {
+		panic(err)
+	}
+	fmt.Printf("KV: %v %s\n", pair.Key, pair.Value)
+}
+```
+
+To run this example, start a Consul server:
+
+```bash
+consul agent -dev
+```
+
+Copy the code above into a file such as `main.go`.
+
+Install and run. You'll see a key (`REDIS_MAXCLIENTS`) and value (`1000`) printed.
+
+```bash
+$ go get
+$ go run main.go
+KV: REDIS_MAXCLIENTS 1000
+```
+
+After running the code, you can also view the values in the Consul UI on your local machine at http://localhost:8500/ui/dc1/kv
diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go
new file mode 100644
index 0000000..53a0523
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/acl.go
@@ -0,0 +1,588 @@
+package api
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"time"
+)
+
+const (
+	// ACLClientType is the client type token
+	ACLClientType = "client"
+
+	// ACLManagementType is the management type token
+	ACLManagementType = "management"
+)
+
+type ACLTokenPolicyLink struct {
+	ID   string
+	Name string
+}
+
+// ACLToken represents an ACL Token
+type ACLToken struct {
+	CreateIndex uint64
+	ModifyIndex uint64
+	AccessorID  string
+	SecretID    string
+	Description string
+	Policies    []*ACLTokenPolicyLink
+	Local       bool
+	CreateTime  time.Time `json:",omitempty"`
+	Hash        []byte    `json:",omitempty"`
+
+	// DEPRECATED (ACL-Legacy-Compat)
+	// Rules will only be present for legacy tokens returned via the new APIs
+	Rules string `json:",omitempty"`
+}
+
+type ACLTokenListEntry struct {
+	CreateIndex uint64
+	ModifyIndex uint64
+	AccessorID  string
+	Description string
+	Policies    []*ACLTokenPolicyLink
+	Local       bool
+	CreateTime  time.Time
+	Hash        []byte
+	Legacy      bool
+}
+
+// ACLEntry is used to represent a legacy ACL token
+// The legacy tokens are deprecated.
+type ACLEntry struct {
+	CreateIndex uint64
+	ModifyIndex uint64
+	ID          string
+	Name        string
+	Type        string
+	Rules       string
+}
+
+// ACLReplicationStatus is used to represent the status of ACL replication.
+type ACLReplicationStatus struct {
+	Enabled              bool
+	Running              bool
+	SourceDatacenter     string
+	ReplicationType      string
+	ReplicatedIndex      uint64
+	ReplicatedTokenIndex uint64
+	LastSuccess          time.Time
+	LastError            time.Time
+}
+
+// ACLPolicy represents an ACL Policy.
+type ACLPolicy struct {
+	ID          string
+	Name        string
+	Description string
+	Rules       string
+	Datacenters []string
+	Hash        []byte
+	CreateIndex uint64
+	ModifyIndex uint64
+}
+
+type ACLPolicyListEntry struct {
+	ID          string
+	Name        string
+	Description string
+	Datacenters []string
+	Hash        []byte
+	CreateIndex uint64
+	ModifyIndex uint64
+}
+
+// ACL can be used to query the ACL endpoints
+type ACL struct {
+	c *Client
+}
+
+// ACL returns a handle to the ACL endpoints
+func (c *Client) ACL() *ACL {
+	return &ACL{c}
+}
+
+// Bootstrap is used to perform a one-time ACL bootstrap operation on a cluster
+// to get the first management token.
+func (a *ACL) Bootstrap() (*ACLToken, *WriteMeta, error) {
+	r := a.c.newRequest("PUT", "/v1/acl/bootstrap")
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out ACLToken
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return &out, wm, nil
+}
+
+// Create is used to generate a new token with the given parameters
+//
+// Deprecated: Use TokenCreate instead.
+func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) {
+	r := a.c.newRequest("PUT", "/v1/acl/create")
+	r.setWriteOptions(q)
+	r.obj = acl
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return "", nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out struct{ ID string }
+	if err := decodeBody(resp, &out); err != nil {
+		return "", nil, err
+	}
+	return out.ID, wm, nil
+}
+
+// Update is used to update the rules of an existing token
+//
+// Deprecated: Use TokenUpdate instead.
+func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) {
+	r := a.c.newRequest("PUT", "/v1/acl/update")
+	r.setWriteOptions(q)
+	r.obj = acl
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	return wm, nil
+}
+
+// Destroy is used to destroy a given ACL token ID
+//
+// Deprecated: Use TokenDelete instead.
+func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
+	r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id)
+	r.setWriteOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	return wm, nil
+}
+
+// Clone is used to return a new token cloned from an existing one
+//
+// Deprecated: Use TokenClone instead.
+func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) {
+	r := a.c.newRequest("PUT", "/v1/acl/clone/"+id)
+	r.setWriteOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return "", nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out struct{ ID string }
+	if err := decodeBody(resp, &out); err != nil {
+		return "", nil, err
+	}
+	return out.ID, wm, nil
+}
+
+// Info is used to query for information about an ACL token
+//
+// Deprecated: Use TokenRead instead.
+func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/acl/info/"+id)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var entries []*ACLEntry
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	if len(entries) > 0 {
+		return entries[0], qm, nil
+	}
+	return nil, qm, nil
+}
+
+// List is used to get all the ACL tokens
+//
+// Deprecated: Use TokenList instead.
+func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/acl/list")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var entries []*ACLEntry
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
+
+// Replication returns the status of the ACL replication process in the datacenter
+func (a *ACL) Replication(q *QueryOptions) (*ACLReplicationStatus, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/acl/replication")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var entries *ACLReplicationStatus
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
+
+// TokenCreate creates a new ACL token. It requires that the AccessorID and SecretID fields
+// of the ACLToken structure to be empty as these will be filled in by Consul.
+func (a *ACL) TokenCreate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) {
+	if token.AccessorID != "" {
+		return nil, nil, fmt.Errorf("Cannot specify an AccessorID in Token Creation")
+	}
+
+	if token.SecretID != "" {
+		return nil, nil, fmt.Errorf("Cannot specify a SecretID in Token Creation")
+	}
+
+	r := a.c.newRequest("PUT", "/v1/acl/token")
+	r.setWriteOptions(q)
+	r.obj = token
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out ACLToken
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+
+	return &out, wm, nil
+}
+
+// TokenUpdate updates a token in place without modifying its AccessorID or SecretID. A valid
+// AccessorID must be set in the ACLToken structure passed to this function but the SecretID may
+// be omitted and will be filled in by Consul with its existing value.
+func (a *ACL) TokenUpdate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) {
+	if token.AccessorID == "" {
+		return nil, nil, fmt.Errorf("Must specify an AccessorID for Token Updating")
+	}
+	r := a.c.newRequest("PUT", "/v1/acl/token/"+token.AccessorID)
+	r.setWriteOptions(q)
+	r.obj = token
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out ACLToken
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+
+	return &out, wm, nil
+}
+
+// TokenClone will create a new token with the same policies and locality as the original
+// token but will have its own auto-generated AccessorID and SecretID as well having the
+// description passed to this function. The tokenID parameter must be a valid Accessor ID
+// of an existing token.
+func (a *ACL) TokenClone(tokenID string, description string, q *WriteOptions) (*ACLToken, *WriteMeta, error) {
+	if tokenID == "" {
+		return nil, nil, fmt.Errorf("Must specify a tokenID for Token Cloning")
+	}
+
+	r := a.c.newRequest("PUT", "/v1/acl/token/"+tokenID+"/clone")
+	r.setWriteOptions(q)
+	r.obj = struct{ Description string }{description}
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out ACLToken
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+
+	return &out, wm, nil
+}
+
+// TokenDelete removes a single ACL token. The tokenID parameter must be a valid
+// Accessor ID of an existing token.
+func (a *ACL) TokenDelete(tokenID string, q *WriteOptions) (*WriteMeta, error) {
+	r := a.c.newRequest("DELETE", "/v1/acl/token/"+tokenID)
+	r.setWriteOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	return wm, nil
+}
+
+// TokenRead retrieves the full token details. The tokenID parameter must be a valid
+// Accessor ID of an existing token.
+func (a *ACL) TokenRead(tokenID string, q *QueryOptions) (*ACLToken, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/acl/token/"+tokenID)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out ACLToken
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+
+	return &out, qm, nil
+}
+
+// TokenReadSelf retrieves the full token details of the token currently
+// assigned to the API Client. In this manner its possible to read a token
+// by its Secret ID.
+func (a *ACL) TokenReadSelf(q *QueryOptions) (*ACLToken, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/acl/token/self")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out ACLToken
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+
+	return &out, qm, nil
+}
+
+// TokenList lists all tokens. The listing does not contain any SecretIDs as those
+// may only be retrieved by a call to TokenRead.
+func (a *ACL) TokenList(q *QueryOptions) ([]*ACLTokenListEntry, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/acl/tokens")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var entries []*ACLTokenListEntry
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
+
+// PolicyCreate will create a new policy. It is not allowed for the policy parameters
+// ID field to be set as this will be generated by Consul while processing the request.
+func (a *ACL) PolicyCreate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) {
+	if policy.ID != "" {
+		return nil, nil, fmt.Errorf("Cannot specify an ID in Policy Creation")
+	}
+
+	r := a.c.newRequest("PUT", "/v1/acl/policy")
+	r.setWriteOptions(q)
+	r.obj = policy
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out ACLPolicy
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+
+	return &out, wm, nil
+}
+
+// PolicyUpdate updates a policy. The ID field of the policy parameter must be set to an
+// existing policy ID
+func (a *ACL) PolicyUpdate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) {
+	if policy.ID == "" {
+		return nil, nil, fmt.Errorf("Must specify an ID in Policy Creation")
+	}
+
+	r := a.c.newRequest("PUT", "/v1/acl/policy/"+policy.ID)
+	r.setWriteOptions(q)
+	r.obj = policy
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out ACLPolicy
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+
+	return &out, wm, nil
+}
+
+// PolicyDelete deletes a policy given its ID.
+func (a *ACL) PolicyDelete(policyID string, q *WriteOptions) (*WriteMeta, error) {
+	r := a.c.newRequest("DELETE", "/v1/acl/policy/"+policyID)
+	r.setWriteOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	return wm, nil
+}
+
+// PolicyRead retrieves the policy details including the rule set.
+func (a *ACL) PolicyRead(policyID string, q *QueryOptions) (*ACLPolicy, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/acl/policy/"+policyID)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out ACLPolicy
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+
+	return &out, qm, nil
+}
+
+// PolicyList retrieves a listing of all policies. The listing does not include the
+// rules for any policy as those should be retrieved by subsequent calls to PolicyRead.
+func (a *ACL) PolicyList(q *QueryOptions) ([]*ACLPolicyListEntry, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/acl/policies")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var entries []*ACLPolicyListEntry
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
+
+// RulesTranslate translates the legacy rule syntax into the current syntax.
+//
+// Deprecated: Support for the legacy syntax translation will be removed
+// when legacy ACL support is removed.
+func (a *ACL) RulesTranslate(rules io.Reader) (string, error) {
+	r := a.c.newRequest("POST", "/v1/acl/rules/translate")
+	r.body = rules
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return "", err
+	}
+	defer resp.Body.Close()
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	ruleBytes, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return "", fmt.Errorf("Failed to read translated rule body: %v", err)
+	}
+
+	return string(ruleBytes), nil
+}
+
+// RulesTranslateToken translates the rules associated with the legacy syntax
+// into the current syntax and returns the results.
+//
+// Deprecated: Support for the legacy syntax translation will be removed
+// when legacy ACL support is removed.
+func (a *ACL) RulesTranslateToken(tokenID string) (string, error) {
+	r := a.c.newRequest("GET", "/v1/acl/rules/translate/"+tokenID)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return "", err
+	}
+	defer resp.Body.Close()
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	ruleBytes, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return "", fmt.Errorf("Failed to read translated rule body: %v", err)
+	}
+
+	return string(ruleBytes), nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go
new file mode 100644
index 0000000..8e5ffde
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/agent.go
@@ -0,0 +1,890 @@
+package api
+
+import (
+	"bufio"
+	"fmt"
+)
+
+// ServiceKind is the kind of service being registered.
+type ServiceKind string
+
+const (
+	// ServiceKindTypical is a typical, classic Consul service. This is
+	// represented by the absence of a value. This was chosen for ease of
+	// backwards compatibility: existing services in the catalog would
+	// default to the typical service.
+	ServiceKindTypical ServiceKind = ""
+
+	// ServiceKindConnectProxy is a proxy for the Connect feature. This
+	// service proxies another service within Consul and speaks the connect
+	// protocol.
+	ServiceKindConnectProxy ServiceKind = "connect-proxy"
+)
+
+// ProxyExecMode is the execution mode for a managed Connect proxy.
+type ProxyExecMode string
+
+const (
+	// ProxyExecModeDaemon indicates that the proxy command should be long-running
+	// and should be started and supervised by the agent until it's target service
+	// is deregistered.
+	ProxyExecModeDaemon ProxyExecMode = "daemon"
+
+	// ProxyExecModeScript indicates that the proxy command should be invoke to
+	// completion on each change to the configuration of lifecycle event. The
+	// script typically fetches the config and certificates from the agent API and
+	// then configures an externally managed daemon, perhaps starting and stopping
+	// it if necessary.
+	ProxyExecModeScript ProxyExecMode = "script"
+)
+
+// UpstreamDestType is the type of upstream discovery mechanism.
+type UpstreamDestType string
+
+const (
+	// UpstreamDestTypeService discovers instances via healthy service lookup.
+	UpstreamDestTypeService UpstreamDestType = "service"
+
+	// UpstreamDestTypePreparedQuery discovers instances via prepared query
+	// execution.
+	UpstreamDestTypePreparedQuery UpstreamDestType = "prepared_query"
+)
+
+// AgentCheck represents a check known to the agent
+type AgentCheck struct {
+	Node        string
+	CheckID     string
+	Name        string
+	Status      string
+	Notes       string
+	Output      string
+	ServiceID   string
+	ServiceName string
+	Definition  HealthCheckDefinition
+}
+
+// AgentWeights represent optional weights for a service
+type AgentWeights struct {
+	Passing int
+	Warning int
+}
+
+// AgentService represents a service known to the agent
+type AgentService struct {
+	Kind              ServiceKind `json:",omitempty"`
+	ID                string
+	Service           string
+	Tags              []string
+	Meta              map[string]string
+	Port              int
+	Address           string
+	Weights           AgentWeights
+	EnableTagOverride bool
+	CreateIndex       uint64 `json:",omitempty"`
+	ModifyIndex       uint64 `json:",omitempty"`
+	ContentHash       string `json:",omitempty"`
+	// DEPRECATED (ProxyDestination) - remove this field
+	ProxyDestination string                          `json:",omitempty"`
+	Proxy            *AgentServiceConnectProxyConfig `json:",omitempty"`
+	Connect          *AgentServiceConnect            `json:",omitempty"`
+}
+
+// AgentServiceConnect represents the Connect configuration of a service.
+type AgentServiceConnect struct {
+	Native         bool                      `json:",omitempty"`
+	Proxy          *AgentServiceConnectProxy `json:",omitempty"`
+	SidecarService *AgentServiceRegistration `json:",omitempty"`
+}
+
+// AgentServiceConnectProxy represents the Connect Proxy configuration of a
+// service.
+type AgentServiceConnectProxy struct {
+	ExecMode  ProxyExecMode          `json:",omitempty"`
+	Command   []string               `json:",omitempty"`
+	Config    map[string]interface{} `json:",omitempty"`
+	Upstreams []Upstream             `json:",omitempty"`
+}
+
+// AgentServiceConnectProxyConfig is the proxy configuration in a connect-proxy
+// ServiceDefinition or response.
+type AgentServiceConnectProxyConfig struct {
+	DestinationServiceName string
+	DestinationServiceID   string                 `json:",omitempty"`
+	LocalServiceAddress    string                 `json:",omitempty"`
+	LocalServicePort       int                    `json:",omitempty"`
+	Config                 map[string]interface{} `json:",omitempty"`
+	Upstreams              []Upstream
+}
+
+// AgentMember represents a cluster member known to the agent
+type AgentMember struct {
+	Name        string
+	Addr        string
+	Port        uint16
+	Tags        map[string]string
+	Status      int
+	ProtocolMin uint8
+	ProtocolMax uint8
+	ProtocolCur uint8
+	DelegateMin uint8
+	DelegateMax uint8
+	DelegateCur uint8
+}
+
+// AllSegments is used to select for all segments in MembersOpts.
+const AllSegments = "_all"
+
+// MembersOpts is used for querying member information.
+type MembersOpts struct {
+	// WAN is whether to show members from the WAN.
+	WAN bool
+
+	// Segment is the LAN segment to show members for. Setting this to the
+	// AllSegments value above will show members in all segments.
+	Segment string
+}
+
+// AgentServiceRegistration is used to register a new service
+type AgentServiceRegistration struct {
+	Kind              ServiceKind       `json:",omitempty"`
+	ID                string            `json:",omitempty"`
+	Name              string            `json:",omitempty"`
+	Tags              []string          `json:",omitempty"`
+	Port              int               `json:",omitempty"`
+	Address           string            `json:",omitempty"`
+	EnableTagOverride bool              `json:",omitempty"`
+	Meta              map[string]string `json:",omitempty"`
+	Weights           *AgentWeights     `json:",omitempty"`
+	Check             *AgentServiceCheck
+	Checks            AgentServiceChecks
+	// DEPRECATED (ProxyDestination) - remove this field
+	ProxyDestination string                          `json:",omitempty"`
+	Proxy            *AgentServiceConnectProxyConfig `json:",omitempty"`
+	Connect          *AgentServiceConnect            `json:",omitempty"`
+}
+
+// AgentCheckRegistration is used to register a new check
+type AgentCheckRegistration struct {
+	ID        string `json:",omitempty"`
+	Name      string `json:",omitempty"`
+	Notes     string `json:",omitempty"`
+	ServiceID string `json:",omitempty"`
+	AgentServiceCheck
+}
+
+// AgentServiceCheck is used to define a node or service level check
+type AgentServiceCheck struct {
+	CheckID           string              `json:",omitempty"`
+	Name              string              `json:",omitempty"`
+	Args              []string            `json:"ScriptArgs,omitempty"`
+	DockerContainerID string              `json:",omitempty"`
+	Shell             string              `json:",omitempty"` // Only supported for Docker.
+	Interval          string              `json:",omitempty"`
+	Timeout           string              `json:",omitempty"`
+	TTL               string              `json:",omitempty"`
+	HTTP              string              `json:",omitempty"`
+	Header            map[string][]string `json:",omitempty"`
+	Method            string              `json:",omitempty"`
+	TCP               string              `json:",omitempty"`
+	Status            string              `json:",omitempty"`
+	Notes             string              `json:",omitempty"`
+	TLSSkipVerify     bool                `json:",omitempty"`
+	GRPC              string              `json:",omitempty"`
+	GRPCUseTLS        bool                `json:",omitempty"`
+	AliasNode         string              `json:",omitempty"`
+	AliasService      string              `json:",omitempty"`
+
+	// In Consul 0.7 and later, checks that are associated with a service
+	// may also contain this optional DeregisterCriticalServiceAfter field,
+	// which is a timeout in the same Go time format as Interval and TTL. If
+	// a check is in the critical state for more than this configured value,
+	// then its associated service (and all of its associated checks) will
+	// automatically be deregistered.
+	DeregisterCriticalServiceAfter string `json:",omitempty"`
+}
+type AgentServiceChecks []*AgentServiceCheck
+
+// AgentToken is used when updating ACL tokens for an agent.
+type AgentToken struct {
+	Token string
+}
+
+// Metrics info is used to store different types of metric values from the agent.
+type MetricsInfo struct {
+	Timestamp string
+	Gauges    []GaugeValue
+	Points    []PointValue
+	Counters  []SampledValue
+	Samples   []SampledValue
+}
+
+// GaugeValue stores one value that is updated as time goes on, such as
+// the amount of memory allocated.
+type GaugeValue struct {
+	Name   string
+	Value  float32
+	Labels map[string]string
+}
+
+// PointValue holds a series of points for a metric.
+type PointValue struct {
+	Name   string
+	Points []float32
+}
+
+// SampledValue stores info about a metric that is incremented over time,
+// such as the number of requests to an HTTP endpoint.
+type SampledValue struct {
+	Name   string
+	Count  int
+	Sum    float64
+	Min    float64
+	Max    float64
+	Mean   float64
+	Stddev float64
+	Labels map[string]string
+}
+
+// AgentAuthorizeParams are the request parameters for authorizing a request.
+type AgentAuthorizeParams struct {
+	Target           string
+	ClientCertURI    string
+	ClientCertSerial string
+}
+
+// AgentAuthorize is the response structure for Connect authorization.
+type AgentAuthorize struct {
+	Authorized bool
+	Reason     string
+}
+
+// ConnectProxyConfig is the response structure for agent-local proxy
+// configuration.
+type ConnectProxyConfig struct {
+	ProxyServiceID    string
+	TargetServiceID   string
+	TargetServiceName string
+	ContentHash       string
+	// DEPRECATED(managed-proxies) - this struct is re-used for sidecar configs
+	// but they don't need ExecMode or Command
+	ExecMode  ProxyExecMode `json:",omitempty"`
+	Command   []string      `json:",omitempty"`
+	Config    map[string]interface{}
+	Upstreams []Upstream
+}
+
+// Upstream is the response structure for a proxy upstream configuration.
+type Upstream struct {
+	DestinationType      UpstreamDestType `json:",omitempty"`
+	DestinationNamespace string           `json:",omitempty"`
+	DestinationName      string
+	Datacenter           string                 `json:",omitempty"`
+	LocalBindAddress     string                 `json:",omitempty"`
+	LocalBindPort        int                    `json:",omitempty"`
+	Config               map[string]interface{} `json:",omitempty"`
+}
+
+// Agent can be used to query the Agent endpoints
+type Agent struct {
+	c *Client
+
+	// cache the node name
+	nodeName string
+}
+
+// Agent returns a handle to the agent endpoints
+func (c *Client) Agent() *Agent {
+	return &Agent{c: c}
+}
+
+// Self is used to query the agent we are speaking to for
+// information about itself
+func (a *Agent) Self() (map[string]map[string]interface{}, error) {
+	r := a.c.newRequest("GET", "/v1/agent/self")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out map[string]map[string]interface{}
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Host is used to retrieve information about the host the
+// agent is running on such as CPU, memory, and disk. Requires
+// a operator:read ACL token.
+func (a *Agent) Host() (map[string]interface{}, error) {
+	r := a.c.newRequest("GET", "/v1/agent/host")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out map[string]interface{}
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Metrics is used to query the agent we are speaking to for
+// its current internal metric data
+func (a *Agent) Metrics() (*MetricsInfo, error) {
+	r := a.c.newRequest("GET", "/v1/agent/metrics")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out *MetricsInfo
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Reload triggers a configuration reload for the agent we are connected to.
+func (a *Agent) Reload() error {
+	r := a.c.newRequest("PUT", "/v1/agent/reload")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// NodeName is used to get the node name of the agent
+func (a *Agent) NodeName() (string, error) {
+	if a.nodeName != "" {
+		return a.nodeName, nil
+	}
+	info, err := a.Self()
+	if err != nil {
+		return "", err
+	}
+	name := info["Config"]["NodeName"].(string)
+	a.nodeName = name
+	return name, nil
+}
+
+// Checks returns the locally registered checks
+func (a *Agent) Checks() (map[string]*AgentCheck, error) {
+	r := a.c.newRequest("GET", "/v1/agent/checks")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out map[string]*AgentCheck
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Services returns the locally registered services
+func (a *Agent) Services() (map[string]*AgentService, error) {
+	r := a.c.newRequest("GET", "/v1/agent/services")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out map[string]*AgentService
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+
+	return out, nil
+}
+
+// Service returns a locally registered service instance and allows for
+// hash-based blocking.
+//
+// Note that this uses an unconventional blocking mechanism since it's
+// agent-local state. That means there is no persistent raft index so we block
+// based on object hash instead.
+func (a *Agent) Service(serviceID string, q *QueryOptions) (*AgentService, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/agent/service/"+serviceID)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out *AgentService
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+
+	return out, qm, nil
+}
+
+// Members returns the known gossip members. The WAN
+// flag can be used to query a server for WAN members.
+func (a *Agent) Members(wan bool) ([]*AgentMember, error) {
+	r := a.c.newRequest("GET", "/v1/agent/members")
+	if wan {
+		r.params.Set("wan", "1")
+	}
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out []*AgentMember
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// MembersOpts returns the known gossip members and can be passed
+// additional options for WAN/segment filtering.
+func (a *Agent) MembersOpts(opts MembersOpts) ([]*AgentMember, error) {
+	r := a.c.newRequest("GET", "/v1/agent/members")
+	r.params.Set("segment", opts.Segment)
+	if opts.WAN {
+		r.params.Set("wan", "1")
+	}
+
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out []*AgentMember
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// ServiceRegister is used to register a new service with
+// the local agent
+func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error {
+	r := a.c.newRequest("PUT", "/v1/agent/service/register")
+	r.obj = service
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// ServiceDeregister is used to deregister a service with
+// the local agent
+func (a *Agent) ServiceDeregister(serviceID string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// PassTTL is used to set a TTL check to the passing state.
+//
+// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
+// The client interface will be removed in 0.8 or changed to use
+// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9.
+func (a *Agent) PassTTL(checkID, note string) error {
+	return a.updateTTL(checkID, note, "pass")
+}
+
+// WarnTTL is used to set a TTL check to the warning state.
+//
+// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
+// The client interface will be removed in 0.8 or changed to use
+// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9.
+func (a *Agent) WarnTTL(checkID, note string) error {
+	return a.updateTTL(checkID, note, "warn")
+}
+
+// FailTTL is used to set a TTL check to the failing state.
+//
+// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
+// The client interface will be removed in 0.8 or changed to use
+// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9.
+func (a *Agent) FailTTL(checkID, note string) error {
+	return a.updateTTL(checkID, note, "fail")
+}
+
+// updateTTL is used to update the TTL of a check. This is the internal
+// method that uses the old API that's present in Consul versions prior to
+// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed
+// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below,
+// but keep the old Pass/Warn/Fail methods using the old API under the hood.
+//
+// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
+// The client interface will be removed in 0.8 and the server endpoints will
+// be removed in 0.9.
+func (a *Agent) updateTTL(checkID, note, status string) error {
+	switch status {
+	case "pass":
+	case "warn":
+	case "fail":
+	default:
+		return fmt.Errorf("Invalid status: %s", status)
+	}
+	endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID)
+	r := a.c.newRequest("PUT", endpoint)
+	r.params.Set("note", note)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// checkUpdate is the payload for a PUT for a check update.
+type checkUpdate struct {
+	// Status is one of the api.Health* states: HealthPassing
+	// ("passing"), HealthWarning ("warning"), or HealthCritical
+	// ("critical").
+	Status string
+
+	// Output is the information to post to the UI for operators as the
+	// output of the process that decided to hit the TTL check. This is
+	// different from the note field that's associated with the check
+	// itself.
+	Output string
+}
+
+// UpdateTTL is used to update the TTL of a check. This uses the newer API
+// that was introduced in Consul 0.6.4 and later. We translate the old status
+// strings for compatibility (though a newer version of Consul will still be
+// required to use this API).
+func (a *Agent) UpdateTTL(checkID, output, status string) error {
+	switch status {
+	case "pass", HealthPassing:
+		status = HealthPassing
+	case "warn", HealthWarning:
+		status = HealthWarning
+	case "fail", HealthCritical:
+		status = HealthCritical
+	default:
+		return fmt.Errorf("Invalid status: %s", status)
+	}
+
+	endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID)
+	r := a.c.newRequest("PUT", endpoint)
+	r.obj = &checkUpdate{
+		Status: status,
+		Output: output,
+	}
+
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// CheckRegister is used to register a new check with
+// the local agent
+func (a *Agent) CheckRegister(check *AgentCheckRegistration) error {
+	r := a.c.newRequest("PUT", "/v1/agent/check/register")
+	r.obj = check
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// CheckDeregister is used to deregister a check with
+// the local agent
+func (a *Agent) CheckDeregister(checkID string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// Join is used to instruct the agent to attempt a join to
+// another cluster member
+func (a *Agent) Join(addr string, wan bool) error {
+	r := a.c.newRequest("PUT", "/v1/agent/join/"+addr)
+	if wan {
+		r.params.Set("wan", "1")
+	}
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// Leave is used to have the agent gracefully leave the cluster and shutdown
+func (a *Agent) Leave() error {
+	r := a.c.newRequest("PUT", "/v1/agent/leave")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// ForceLeave is used to have the agent eject a failed node
+func (a *Agent) ForceLeave(node string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// ConnectAuthorize is used to authorize an incoming connection
+// to a natively integrated Connect service.
+func (a *Agent) ConnectAuthorize(auth *AgentAuthorizeParams) (*AgentAuthorize, error) {
+	r := a.c.newRequest("POST", "/v1/agent/connect/authorize")
+	r.obj = auth
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out AgentAuthorize
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return &out, nil
+}
+
+// ConnectCARoots returns the list of roots.
+func (a *Agent) ConnectCARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/agent/connect/ca/roots")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out CARootList
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return &out, qm, nil
+}
+
+// ConnectCALeaf gets the leaf certificate for the given service ID.
+func (a *Agent) ConnectCALeaf(serviceID string, q *QueryOptions) (*LeafCert, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/agent/connect/ca/leaf/"+serviceID)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out LeafCert
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return &out, qm, nil
+}
+
+// ConnectProxyConfig gets the configuration for a local managed proxy instance.
+//
+// Note that this uses an unconventional blocking mechanism since it's
+// agent-local state. That means there is no persistent raft index so we block
+// based on object hash instead.
+func (a *Agent) ConnectProxyConfig(proxyServiceID string, q *QueryOptions) (*ConnectProxyConfig, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/agent/connect/proxy/"+proxyServiceID)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out ConnectProxyConfig
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return &out, qm, nil
+}
+
+// EnableServiceMaintenance toggles service maintenance mode on
+// for the given service ID.
+func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID)
+	r.params.Set("enable", "true")
+	r.params.Set("reason", reason)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// DisableServiceMaintenance toggles service maintenance mode off
+// for the given service ID.
+func (a *Agent) DisableServiceMaintenance(serviceID string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID)
+	r.params.Set("enable", "false")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// EnableNodeMaintenance toggles node maintenance mode on for the
+// agent we are connected to.
+func (a *Agent) EnableNodeMaintenance(reason string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/maintenance")
+	r.params.Set("enable", "true")
+	r.params.Set("reason", reason)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// DisableNodeMaintenance toggles node maintenance mode off for the
+// agent we are connected to.
+func (a *Agent) DisableNodeMaintenance() error {
+	r := a.c.newRequest("PUT", "/v1/agent/maintenance")
+	r.params.Set("enable", "false")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// Monitor returns a channel which will receive streaming logs from the agent
+// Providing a non-nil stopCh can be used to close the connection and stop the
+// log stream. An empty string will be sent down the given channel when there's
+// nothing left to stream, after which the caller should close the stopCh.
+func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) {
+	r := a.c.newRequest("GET", "/v1/agent/monitor")
+	r.setQueryOptions(q)
+	if loglevel != "" {
+		r.params.Add("loglevel", loglevel)
+	}
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+
+	logCh := make(chan string, 64)
+	go func() {
+		defer resp.Body.Close()
+
+		scanner := bufio.NewScanner(resp.Body)
+		for {
+			select {
+			case <-stopCh:
+				close(logCh)
+				return
+			default:
+			}
+			if scanner.Scan() {
+				// An empty string signals to the caller that
+				// the scan is done, so make sure we only emit
+				// that when the scanner says it's done, not if
+				// we happen to ingest an empty line.
+				if text := scanner.Text(); text != "" {
+					logCh <- text
+				} else {
+					logCh <- " "
+				}
+			} else {
+				logCh <- ""
+			}
+		}
+	}()
+
+	return logCh, nil
+}
+
+// UpdateACLToken updates the agent's "acl_token". See updateToken for more
+// details.
+func (a *Agent) UpdateACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
+	return a.updateToken("acl_token", token, q)
+}
+
+// UpdateACLAgentToken updates the agent's "acl_agent_token". See updateToken
+// for more details.
+func (a *Agent) UpdateACLAgentToken(token string, q *WriteOptions) (*WriteMeta, error) {
+	return a.updateToken("acl_agent_token", token, q)
+}
+
+// UpdateACLAgentMasterToken updates the agent's "acl_agent_master_token". See
+// updateToken for more details.
+func (a *Agent) UpdateACLAgentMasterToken(token string, q *WriteOptions) (*WriteMeta, error) {
+	return a.updateToken("acl_agent_master_token", token, q)
+}
+
+// UpdateACLReplicationToken updates the agent's "acl_replication_token". See
+// updateToken for more details.
+func (a *Agent) UpdateACLReplicationToken(token string, q *WriteOptions) (*WriteMeta, error) {
+	return a.updateToken("acl_replication_token", token, q)
+}
+
+// updateToken can be used to update an agent's ACL token after the agent has
+// started. The tokens are not persisted, so will need to be updated again if
+// the agent is restarted.
+func (a *Agent) updateToken(target, token string, q *WriteOptions) (*WriteMeta, error) {
+	r := a.c.newRequest("PUT", fmt.Sprintf("/v1/agent/token/%s", target))
+	r.setWriteOptions(q)
+	r.obj = &AgentToken{Token: token}
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	return wm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go
new file mode 100644
index 0000000..cf8bad2
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/api.go
@@ -0,0 +1,899 @@
+package api
+
+import (
+	"bytes"
+	"context"
+	"crypto/tls"
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"log"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/hashicorp/go-cleanhttp"
+	"github.com/hashicorp/go-rootcerts"
+)
+
+const (
+	// HTTPAddrEnvName defines an environment variable name which sets
+	// the HTTP address if there is no -http-addr specified.
+	HTTPAddrEnvName = "CONSUL_HTTP_ADDR"
+
+	// HTTPTokenEnvName defines an environment variable name which sets
+	// the HTTP token.
+	HTTPTokenEnvName = "CONSUL_HTTP_TOKEN"
+
+	// HTTPAuthEnvName defines an environment variable name which sets
+	// the HTTP authentication header.
+	HTTPAuthEnvName = "CONSUL_HTTP_AUTH"
+
+	// HTTPSSLEnvName defines an environment variable name which sets
+	// whether or not to use HTTPS.
+	HTTPSSLEnvName = "CONSUL_HTTP_SSL"
+
+	// HTTPCAFile defines an environment variable name which sets the
+	// CA file to use for talking to Consul over TLS.
+	HTTPCAFile = "CONSUL_CACERT"
+
+	// HTTPCAPath defines an environment variable name which sets the
+	// path to a directory of CA certs to use for talking to Consul over TLS.
+	HTTPCAPath = "CONSUL_CAPATH"
+
+	// HTTPClientCert defines an environment variable name which sets the
+	// client cert file to use for talking to Consul over TLS.
+	HTTPClientCert = "CONSUL_CLIENT_CERT"
+
+	// HTTPClientKey defines an environment variable name which sets the
+	// client key file to use for talking to Consul over TLS.
+	HTTPClientKey = "CONSUL_CLIENT_KEY"
+
+	// HTTPTLSServerName defines an environment variable name which sets the
+	// server name to use as the SNI host when connecting via TLS
+	HTTPTLSServerName = "CONSUL_TLS_SERVER_NAME"
+
+	// HTTPSSLVerifyEnvName defines an environment variable name which sets
+	// whether or not to disable certificate checking.
+	HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY"
+
+	// GRPCAddrEnvName defines an environment variable name which sets the gRPC
+	// address for consul connect envoy. Note this isn't actually used by the api
+	// client in this package but is defined here for consistency with all the
+	// other ENV names we use.
+	GRPCAddrEnvName = "CONSUL_GRPC_ADDR"
+)
+
+// QueryOptions are used to parameterize a query
+type QueryOptions struct {
+	// Providing a datacenter overwrites the DC provided
+	// by the Config
+	Datacenter string
+
+	// AllowStale allows any Consul server (non-leader) to service
+	// a read. This allows for lower latency and higher throughput
+	AllowStale bool
+
+	// RequireConsistent forces the read to be fully consistent.
+	// This is more expensive but prevents ever performing a stale
+	// read.
+	RequireConsistent bool
+
+	// UseCache requests that the agent cache results locally. See
+	// https://www.consul.io/api/index.html#agent-caching for more details on the
+	// semantics.
+	UseCache bool
+
+	// MaxAge limits how old a cached value will be returned if UseCache is true.
+	// If there is a cached response that is older than the MaxAge, it is treated
+	// as a cache miss and a new fetch invoked. If the fetch fails, the error is
+	// returned. Clients that wish to allow for stale results on error can set
+	// StaleIfError to a longer duration to change this behaviour. It is ignored
+	// if the endpoint supports background refresh caching. See
+	// https://www.consul.io/api/index.html#agent-caching for more details.
+	MaxAge time.Duration
+
+	// StaleIfError specifies how stale the client will accept a cached response
+	// if the servers are unavailable to fetch a fresh one. Only makes sense when
+	// UseCache is true and MaxAge is set to a lower, non-zero value. It is
+	// ignored if the endpoint supports background refresh caching. See
+	// https://www.consul.io/api/index.html#agent-caching for more details.
+	StaleIfError time.Duration
+
+	// WaitIndex is used to enable a blocking query. Waits
+	// until the timeout or the next index is reached
+	WaitIndex uint64
+
+	// WaitHash is used by some endpoints instead of WaitIndex to perform blocking
+	// on state based on a hash of the response rather than a monotonic index.
+	// This is required when the state being blocked on is not stored in Raft, for
+	// example agent-local proxy configuration.
+	WaitHash string
+
+	// WaitTime is used to bound the duration of a wait.
+	// Defaults to that of the Config, but can be overridden.
+	WaitTime time.Duration
+
+	// Token is used to provide a per-request ACL token
+	// which overrides the agent's default token.
+	Token string
+
+	// Near is used to provide a node name that will sort the results
+	// in ascending order based on the estimated round trip time from
+	// that node. Setting this to "_agent" will use the agent's node
+	// for the sort.
+	Near string
+
+	// NodeMeta is used to filter results by nodes with the given
+	// metadata key/value pairs. Currently, only one key/value pair can
+	// be provided for filtering.
+	NodeMeta map[string]string
+
+	// RelayFactor is used in keyring operations to cause responses to be
+	// relayed back to the sender through N other random nodes. Must be
+	// a value from 0 to 5 (inclusive).
+	RelayFactor uint8
+
+	// Connect filters prepared query execution to only include Connect-capable
+	// services. This currently affects prepared query execution.
+	Connect bool
+
+	// ctx is an optional context pass through to the underlying HTTP
+	// request layer. Use Context() and WithContext() to manage this.
+	ctx context.Context
+}
+
+func (o *QueryOptions) Context() context.Context {
+	if o != nil && o.ctx != nil {
+		return o.ctx
+	}
+	return context.Background()
+}
+
+func (o *QueryOptions) WithContext(ctx context.Context) *QueryOptions {
+	o2 := new(QueryOptions)
+	if o != nil {
+		*o2 = *o
+	}
+	o2.ctx = ctx
+	return o2
+}
+
+// WriteOptions are used to parameterize a write
+type WriteOptions struct {
+	// Providing a datacenter overwrites the DC provided
+	// by the Config
+	Datacenter string
+
+	// Token is used to provide a per-request ACL token
+	// which overrides the agent's default token.
+	Token string
+
+	// RelayFactor is used in keyring operations to cause responses to be
+	// relayed back to the sender through N other random nodes. Must be
+	// a value from 0 to 5 (inclusive).
+	RelayFactor uint8
+
+	// ctx is an optional context pass through to the underlying HTTP
+	// request layer. Use Context() and WithContext() to manage this.
+	ctx context.Context
+}
+
+func (o *WriteOptions) Context() context.Context {
+	if o != nil && o.ctx != nil {
+		return o.ctx
+	}
+	return context.Background()
+}
+
+func (o *WriteOptions) WithContext(ctx context.Context) *WriteOptions {
+	o2 := new(WriteOptions)
+	if o != nil {
+		*o2 = *o
+	}
+	o2.ctx = ctx
+	return o2
+}
+
+// QueryMeta is used to return meta data about a query
+type QueryMeta struct {
+	// LastIndex. This can be used as a WaitIndex to perform
+	// a blocking query
+	LastIndex uint64
+
+	// LastContentHash. This can be used as a WaitHash to perform a blocking query
+	// for endpoints that support hash-based blocking. Endpoints that do not
+	// support it will return an empty hash.
+	LastContentHash string
+
+	// Time of last contact from the leader for the
+	// server servicing the request
+	LastContact time.Duration
+
+	// Is there a known leader
+	KnownLeader bool
+
+	// How long did the request take
+	RequestTime time.Duration
+
+	// Is address translation enabled for HTTP responses on this agent
+	AddressTranslationEnabled bool
+
+	// CacheHit is true if the result was served from agent-local cache.
+	CacheHit bool
+
+	// CacheAge is set if request was ?cached and indicates how stale the cached
+	// response is.
+	CacheAge time.Duration
+}
+
+// WriteMeta is used to return meta data about a write
+type WriteMeta struct {
+	// How long did the request take
+	RequestTime time.Duration
+}
+
+// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication
+type HttpBasicAuth struct {
+	// Username to use for HTTP Basic Authentication
+	Username string
+
+	// Password to use for HTTP Basic Authentication
+	Password string
+}
+
+// Config is used to configure the creation of a client
+type Config struct {
+	// Address is the address of the Consul server
+	Address string
+
+	// Scheme is the URI scheme for the Consul server
+	Scheme string
+
+	// Datacenter to use. If not provided, the default agent datacenter is used.
+	Datacenter string
+
+	// Transport is the Transport to use for the http client.
+	Transport *http.Transport
+
+	// HttpClient is the client to use. Default will be
+	// used if not provided.
+	HttpClient *http.Client
+
+	// HttpAuth is the auth info to use for http access.
+	HttpAuth *HttpBasicAuth
+
+	// WaitTime limits how long a Watch will block. If not provided,
+	// the agent default values will be used.
+	WaitTime time.Duration
+
+	// Token is used to provide a per-request ACL token
+	// which overrides the agent's default token.
+	Token string
+
+	TLSConfig TLSConfig
+}
+
+// TLSConfig is used to generate a TLSClientConfig that's useful for talking to
+// Consul using TLS.
+type TLSConfig struct {
+	// Address is the optional address of the Consul server. The port, if any
+	// will be removed from here and this will be set to the ServerName of the
+	// resulting config.
+	Address string
+
+	// CAFile is the optional path to the CA certificate used for Consul
+	// communication, defaults to the system bundle if not specified.
+	CAFile string
+
+	// CAPath is the optional path to a directory of CA certificates to use for
+	// Consul communication, defaults to the system bundle if not specified.
+	CAPath string
+
+	// CertFile is the optional path to the certificate for Consul
+	// communication. If this is set then you need to also set KeyFile.
+	CertFile string
+
+	// KeyFile is the optional path to the private key for Consul communication.
+	// If this is set then you need to also set CertFile.
+	KeyFile string
+
+	// InsecureSkipVerify if set to true will disable TLS host verification.
+	InsecureSkipVerify bool
+}
+
+// DefaultConfig returns a default configuration for the client. By default this
+// will pool and reuse idle connections to Consul. If you have a long-lived
+// client object, this is the desired behavior and should make the most efficient
+// use of the connections to Consul. If you don't reuse a client object, which
+// is not recommended, then you may notice idle connections building up over
+// time. To avoid this, use the DefaultNonPooledConfig() instead.
+func DefaultConfig() *Config {
+	return defaultConfig(cleanhttp.DefaultPooledTransport)
+}
+
+// DefaultNonPooledConfig returns a default configuration for the client which
+// does not pool connections. This isn't a recommended configuration because it
+// will reconnect to Consul on every request, but this is useful to avoid the
+// accumulation of idle connections if you make many client objects during the
+// lifetime of your application.
+func DefaultNonPooledConfig() *Config {
+	return defaultConfig(cleanhttp.DefaultTransport)
+}
+
+// defaultConfig returns the default configuration for the client, using the
+// given function to make the transport.
+func defaultConfig(transportFn func() *http.Transport) *Config {
+	config := &Config{
+		Address:   "127.0.0.1:8500",
+		Scheme:    "http",
+		Transport: transportFn(),
+	}
+
+	if addr := os.Getenv(HTTPAddrEnvName); addr != "" {
+		config.Address = addr
+	}
+
+	if token := os.Getenv(HTTPTokenEnvName); token != "" {
+		config.Token = token
+	}
+
+	if auth := os.Getenv(HTTPAuthEnvName); auth != "" {
+		var username, password string
+		if strings.Contains(auth, ":") {
+			split := strings.SplitN(auth, ":", 2)
+			username = split[0]
+			password = split[1]
+		} else {
+			username = auth
+		}
+
+		config.HttpAuth = &HttpBasicAuth{
+			Username: username,
+			Password: password,
+		}
+	}
+
+	if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" {
+		enabled, err := strconv.ParseBool(ssl)
+		if err != nil {
+			log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLEnvName, err)
+		}
+
+		if enabled {
+			config.Scheme = "https"
+		}
+	}
+
+	if v := os.Getenv(HTTPTLSServerName); v != "" {
+		config.TLSConfig.Address = v
+	}
+	if v := os.Getenv(HTTPCAFile); v != "" {
+		config.TLSConfig.CAFile = v
+	}
+	if v := os.Getenv(HTTPCAPath); v != "" {
+		config.TLSConfig.CAPath = v
+	}
+	if v := os.Getenv(HTTPClientCert); v != "" {
+		config.TLSConfig.CertFile = v
+	}
+	if v := os.Getenv(HTTPClientKey); v != "" {
+		config.TLSConfig.KeyFile = v
+	}
+	if v := os.Getenv(HTTPSSLVerifyEnvName); v != "" {
+		doVerify, err := strconv.ParseBool(v)
+		if err != nil {
+			log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err)
+		}
+		if !doVerify {
+			config.TLSConfig.InsecureSkipVerify = true
+		}
+	}
+
+	return config
+}
+
+// TLSConfig is used to generate a TLSClientConfig that's useful for talking to
+// Consul using TLS.
+func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) {
+	tlsClientConfig := &tls.Config{
+		InsecureSkipVerify: tlsConfig.InsecureSkipVerify,
+	}
+
+	if tlsConfig.Address != "" {
+		server := tlsConfig.Address
+		hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]")
+		if hasPort {
+			var err error
+			server, _, err = net.SplitHostPort(server)
+			if err != nil {
+				return nil, err
+			}
+		}
+		tlsClientConfig.ServerName = server
+	}
+
+	if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" {
+		tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile)
+		if err != nil {
+			return nil, err
+		}
+		tlsClientConfig.Certificates = []tls.Certificate{tlsCert}
+	}
+
+	if tlsConfig.CAFile != "" || tlsConfig.CAPath != "" {
+		rootConfig := &rootcerts.Config{
+			CAFile: tlsConfig.CAFile,
+			CAPath: tlsConfig.CAPath,
+		}
+		if err := rootcerts.ConfigureTLS(tlsClientConfig, rootConfig); err != nil {
+			return nil, err
+		}
+	}
+
+	return tlsClientConfig, nil
+}
+
+func (c *Config) GenerateEnv() []string {
+	env := make([]string, 0, 10)
+
+	env = append(env,
+		fmt.Sprintf("%s=%s", HTTPAddrEnvName, c.Address),
+		fmt.Sprintf("%s=%s", HTTPTokenEnvName, c.Token),
+		fmt.Sprintf("%s=%t", HTTPSSLEnvName, c.Scheme == "https"),
+		fmt.Sprintf("%s=%s", HTTPCAFile, c.TLSConfig.CAFile),
+		fmt.Sprintf("%s=%s", HTTPCAPath, c.TLSConfig.CAPath),
+		fmt.Sprintf("%s=%s", HTTPClientCert, c.TLSConfig.CertFile),
+		fmt.Sprintf("%s=%s", HTTPClientKey, c.TLSConfig.KeyFile),
+		fmt.Sprintf("%s=%s", HTTPTLSServerName, c.TLSConfig.Address),
+		fmt.Sprintf("%s=%t", HTTPSSLVerifyEnvName, !c.TLSConfig.InsecureSkipVerify))
+
+	if c.HttpAuth != nil {
+		env = append(env, fmt.Sprintf("%s=%s:%s", HTTPAuthEnvName, c.HttpAuth.Username, c.HttpAuth.Password))
+	} else {
+		env = append(env, fmt.Sprintf("%s=", HTTPAuthEnvName))
+	}
+
+	return env
+}
+
+// Client provides a client to the Consul API
+type Client struct {
+	config Config
+}
+
+// NewClient returns a new client
+func NewClient(config *Config) (*Client, error) {
+	// bootstrap the config
+	defConfig := DefaultConfig()
+
+	if len(config.Address) == 0 {
+		config.Address = defConfig.Address
+	}
+
+	if len(config.Scheme) == 0 {
+		config.Scheme = defConfig.Scheme
+	}
+
+	if config.Transport == nil {
+		config.Transport = defConfig.Transport
+	}
+
+	if config.TLSConfig.Address == "" {
+		config.TLSConfig.Address = defConfig.TLSConfig.Address
+	}
+
+	if config.TLSConfig.CAFile == "" {
+		config.TLSConfig.CAFile = defConfig.TLSConfig.CAFile
+	}
+
+	if config.TLSConfig.CAPath == "" {
+		config.TLSConfig.CAPath = defConfig.TLSConfig.CAPath
+	}
+
+	if config.TLSConfig.CertFile == "" {
+		config.TLSConfig.CertFile = defConfig.TLSConfig.CertFile
+	}
+
+	if config.TLSConfig.KeyFile == "" {
+		config.TLSConfig.KeyFile = defConfig.TLSConfig.KeyFile
+	}
+
+	if !config.TLSConfig.InsecureSkipVerify {
+		config.TLSConfig.InsecureSkipVerify = defConfig.TLSConfig.InsecureSkipVerify
+	}
+
+	if config.HttpClient == nil {
+		var err error
+		config.HttpClient, err = NewHttpClient(config.Transport, config.TLSConfig)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	parts := strings.SplitN(config.Address, "://", 2)
+	if len(parts) == 2 {
+		switch parts[0] {
+		case "http":
+			config.Scheme = "http"
+		case "https":
+			config.Scheme = "https"
+		case "unix":
+			trans := cleanhttp.DefaultTransport()
+			trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
+				return net.Dial("unix", parts[1])
+			}
+			config.HttpClient = &http.Client{
+				Transport: trans,
+			}
+		default:
+			return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0])
+		}
+		config.Address = parts[1]
+	}
+
+	if config.Token == "" {
+		config.Token = defConfig.Token
+	}
+
+	return &Client{config: *config}, nil
+}
+
+// NewHttpClient returns an http client configured with the given Transport and TLS
+// config.
+func NewHttpClient(transport *http.Transport, tlsConf TLSConfig) (*http.Client, error) {
+	client := &http.Client{
+		Transport: transport,
+	}
+
+	// TODO (slackpad) - Once we get some run time on the HTTP/2 support we
+	// should turn it on by default if TLS is enabled. We would basically
+	// just need to call http2.ConfigureTransport(transport) here. We also
+	// don't want to introduce another external dependency on
+	// golang.org/x/net/http2 at this time. For a complete recipe for how
+	// to enable HTTP/2 support on a transport suitable for the API client
+	// library see agent/http_test.go:TestHTTPServer_H2.
+
+	if transport.TLSClientConfig == nil {
+		tlsClientConfig, err := SetupTLSConfig(&tlsConf)
+
+		if err != nil {
+			return nil, err
+		}
+
+		transport.TLSClientConfig = tlsClientConfig
+	}
+
+	return client, nil
+}
+
+// request is used to help build up a request
+type request struct {
+	config *Config
+	method string
+	url    *url.URL
+	params url.Values
+	body   io.Reader
+	header http.Header
+	obj    interface{}
+	ctx    context.Context
+}
+
+// setQueryOptions is used to annotate the request with
+// additional query options
+func (r *request) setQueryOptions(q *QueryOptions) {
+	if q == nil {
+		return
+	}
+	if q.Datacenter != "" {
+		r.params.Set("dc", q.Datacenter)
+	}
+	if q.AllowStale {
+		r.params.Set("stale", "")
+	}
+	if q.RequireConsistent {
+		r.params.Set("consistent", "")
+	}
+	if q.WaitIndex != 0 {
+		r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10))
+	}
+	if q.WaitTime != 0 {
+		r.params.Set("wait", durToMsec(q.WaitTime))
+	}
+	if q.WaitHash != "" {
+		r.params.Set("hash", q.WaitHash)
+	}
+	if q.Token != "" {
+		r.header.Set("X-Consul-Token", q.Token)
+	}
+	if q.Near != "" {
+		r.params.Set("near", q.Near)
+	}
+	if len(q.NodeMeta) > 0 {
+		for key, value := range q.NodeMeta {
+			r.params.Add("node-meta", key+":"+value)
+		}
+	}
+	if q.RelayFactor != 0 {
+		r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor)))
+	}
+	if q.Connect {
+		r.params.Set("connect", "true")
+	}
+	if q.UseCache && !q.RequireConsistent {
+		r.params.Set("cached", "")
+
+		cc := []string{}
+		if q.MaxAge > 0 {
+			cc = append(cc, fmt.Sprintf("max-age=%.0f", q.MaxAge.Seconds()))
+		}
+		if q.StaleIfError > 0 {
+			cc = append(cc, fmt.Sprintf("stale-if-error=%.0f", q.StaleIfError.Seconds()))
+		}
+		if len(cc) > 0 {
+			r.header.Set("Cache-Control", strings.Join(cc, ", "))
+		}
+	}
+	r.ctx = q.ctx
+}
+
+// durToMsec converts a duration to a millisecond specified string. If the
+// user selected a positive value that rounds to 0 ms, then we will use 1 ms
+// so they get a short delay, otherwise Consul will translate the 0 ms into
+// a huge default delay.
+func durToMsec(dur time.Duration) string {
+	ms := dur / time.Millisecond
+	if dur > 0 && ms == 0 {
+		ms = 1
+	}
+	return fmt.Sprintf("%dms", ms)
+}
+
+// serverError is a string we look for to detect 500 errors.
+const serverError = "Unexpected response code: 500"
+
+// IsRetryableError returns true for 500 errors from the Consul servers, and
+// network connection errors. These are usually retryable at a later time.
+// This applies to reads but NOT to writes. This may return true for errors
+// on writes that may have still gone through, so do not use this to retry
+// any write operations.
+func IsRetryableError(err error) bool {
+	if err == nil {
+		return false
+	}
+
+	if _, ok := err.(net.Error); ok {
+		return true
+	}
+
+	// TODO (slackpad) - Make a real error type here instead of using
+	// a string check.
+	return strings.Contains(err.Error(), serverError)
+}
+
+// setWriteOptions is used to annotate the request with
+// additional write options
+func (r *request) setWriteOptions(q *WriteOptions) {
+	if q == nil {
+		return
+	}
+	if q.Datacenter != "" {
+		r.params.Set("dc", q.Datacenter)
+	}
+	if q.Token != "" {
+		r.header.Set("X-Consul-Token", q.Token)
+	}
+	if q.RelayFactor != 0 {
+		r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor)))
+	}
+	r.ctx = q.ctx
+}
+
+// toHTTP converts the request to an HTTP request
+func (r *request) toHTTP() (*http.Request, error) {
+	// Encode the query parameters
+	r.url.RawQuery = r.params.Encode()
+
+	// Check if we should encode the body
+	if r.body == nil && r.obj != nil {
+		b, err := encodeBody(r.obj)
+		if err != nil {
+			return nil, err
+		}
+		r.body = b
+	}
+
+	// Create the HTTP request
+	req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body)
+	if err != nil {
+		return nil, err
+	}
+
+	req.URL.Host = r.url.Host
+	req.URL.Scheme = r.url.Scheme
+	req.Host = r.url.Host
+	req.Header = r.header
+
+	// Setup auth
+	if r.config.HttpAuth != nil {
+		req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password)
+	}
+	if r.ctx != nil {
+		return req.WithContext(r.ctx), nil
+	}
+
+	return req, nil
+}
+
+// newRequest is used to create a new request
+func (c *Client) newRequest(method, path string) *request {
+	r := &request{
+		config: &c.config,
+		method: method,
+		url: &url.URL{
+			Scheme: c.config.Scheme,
+			Host:   c.config.Address,
+			Path:   path,
+		},
+		params: make(map[string][]string),
+		header: make(http.Header),
+	}
+	if c.config.Datacenter != "" {
+		r.params.Set("dc", c.config.Datacenter)
+	}
+	if c.config.WaitTime != 0 {
+		r.params.Set("wait", durToMsec(r.config.WaitTime))
+	}
+	if c.config.Token != "" {
+		r.header.Set("X-Consul-Token", r.config.Token)
+	}
+	return r
+}
+
+// doRequest runs a request with our client
+func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
+	req, err := r.toHTTP()
+	if err != nil {
+		return 0, nil, err
+	}
+	start := time.Now()
+	resp, err := c.config.HttpClient.Do(req)
+	diff := time.Since(start)
+	return diff, resp, err
+}
+
+// Query is used to do a GET request against an endpoint
+// and deserialize the response into an interface using
+// standard Consul conventions.
+func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
+	r := c.newRequest("GET", endpoint)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	if err := decodeBody(resp, out); err != nil {
+		return nil, err
+	}
+	return qm, nil
+}
+
+// write is used to do a PUT request against an endpoint
+// and serialize/deserialized using the standard Consul conventions.
+func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {
+	r := c.newRequest("PUT", endpoint)
+	r.setWriteOptions(q)
+	r.obj = in
+	rtt, resp, err := requireOK(c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	if out != nil {
+		if err := decodeBody(resp, &out); err != nil {
+			return nil, err
+		}
+	} else if _, err := ioutil.ReadAll(resp.Body); err != nil {
+		return nil, err
+	}
+	return wm, nil
+}
+
+// parseQueryMeta is used to help parse query meta-data
+func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
+	header := resp.Header
+
+	// Parse the X-Consul-Index (if it's set - hash based blocking queries don't
+	// set this)
+	if indexStr := header.Get("X-Consul-Index"); indexStr != "" {
+		index, err := strconv.ParseUint(indexStr, 10, 64)
+		if err != nil {
+			return fmt.Errorf("Failed to parse X-Consul-Index: %v", err)
+		}
+		q.LastIndex = index
+	}
+	q.LastContentHash = header.Get("X-Consul-ContentHash")
+
+	// Parse the X-Consul-LastContact
+	last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64)
+	if err != nil {
+		return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err)
+	}
+	q.LastContact = time.Duration(last) * time.Millisecond
+
+	// Parse the X-Consul-KnownLeader
+	switch header.Get("X-Consul-KnownLeader") {
+	case "true":
+		q.KnownLeader = true
+	default:
+		q.KnownLeader = false
+	}
+
+	// Parse X-Consul-Translate-Addresses
+	switch header.Get("X-Consul-Translate-Addresses") {
+	case "true":
+		q.AddressTranslationEnabled = true
+	default:
+		q.AddressTranslationEnabled = false
+	}
+
+	// Parse Cache info
+	if cacheStr := header.Get("X-Cache"); cacheStr != "" {
+		q.CacheHit = strings.EqualFold(cacheStr, "HIT")
+	}
+	if ageStr := header.Get("Age"); ageStr != "" {
+		age, err := strconv.ParseUint(ageStr, 10, 64)
+		if err != nil {
+			return fmt.Errorf("Failed to parse Age Header: %v", err)
+		}
+		q.CacheAge = time.Duration(age) * time.Second
+	}
+
+	return nil
+}
+
+// decodeBody is used to JSON decode a body
+func decodeBody(resp *http.Response, out interface{}) error {
+	dec := json.NewDecoder(resp.Body)
+	return dec.Decode(out)
+}
+
+// encodeBody is used to encode a request body
+func encodeBody(obj interface{}) (io.Reader, error) {
+	buf := bytes.NewBuffer(nil)
+	enc := json.NewEncoder(buf)
+	if err := enc.Encode(obj); err != nil {
+		return nil, err
+	}
+	return buf, nil
+}
+
+// requireOK is used to wrap doRequest and check for a 200
+func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) {
+	if e != nil {
+		if resp != nil {
+			resp.Body.Close()
+		}
+		return d, nil, e
+	}
+	if resp.StatusCode != 200 {
+		var buf bytes.Buffer
+		io.Copy(&buf, resp.Body)
+		resp.Body.Close()
+		return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
+	}
+	return d, resp, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go
new file mode 100644
index 0000000..c175c3f
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/catalog.go
@@ -0,0 +1,244 @@
+package api
+
+type Weights struct {
+	Passing int
+	Warning int
+}
+
+type Node struct {
+	ID              string
+	Node            string
+	Address         string
+	Datacenter      string
+	TaggedAddresses map[string]string
+	Meta            map[string]string
+	CreateIndex     uint64
+	ModifyIndex     uint64
+}
+
+type CatalogService struct {
+	ID                       string
+	Node                     string
+	Address                  string
+	Datacenter               string
+	TaggedAddresses          map[string]string
+	NodeMeta                 map[string]string
+	ServiceID                string
+	ServiceName              string
+	ServiceAddress           string
+	ServiceTags              []string
+	ServiceMeta              map[string]string
+	ServicePort              int
+	ServiceWeights           Weights
+	ServiceEnableTagOverride bool
+	// DEPRECATED (ProxyDestination) - remove the next comment!
+	// We forgot to ever add ServiceProxyDestination here so no need to deprecate!
+	ServiceProxy *AgentServiceConnectProxyConfig
+	CreateIndex  uint64
+	Checks       HealthChecks
+	ModifyIndex  uint64
+}
+
+type CatalogNode struct {
+	Node     *Node
+	Services map[string]*AgentService
+}
+
+type CatalogRegistration struct {
+	ID              string
+	Node            string
+	Address         string
+	TaggedAddresses map[string]string
+	NodeMeta        map[string]string
+	Datacenter      string
+	Service         *AgentService
+	Check           *AgentCheck
+	Checks          HealthChecks
+	SkipNodeUpdate  bool
+}
+
+type CatalogDeregistration struct {
+	Node       string
+	Address    string // Obsolete.
+	Datacenter string
+	ServiceID  string
+	CheckID    string
+}
+
+// Catalog can be used to query the Catalog endpoints
+type Catalog struct {
+	c *Client
+}
+
+// Catalog returns a handle to the catalog endpoints
+func (c *Client) Catalog() *Catalog {
+	return &Catalog{c}
+}
+
+func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) {
+	r := c.c.newRequest("PUT", "/v1/catalog/register")
+	r.setWriteOptions(q)
+	r.obj = reg
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	resp.Body.Close()
+
+	wm := &WriteMeta{}
+	wm.RequestTime = rtt
+
+	return wm, nil
+}
+
+func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) {
+	r := c.c.newRequest("PUT", "/v1/catalog/deregister")
+	r.setWriteOptions(q)
+	r.obj = dereg
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	resp.Body.Close()
+
+	wm := &WriteMeta{}
+	wm.RequestTime = rtt
+
+	return wm, nil
+}
+
+// Datacenters is used to query for all the known datacenters
+func (c *Catalog) Datacenters() ([]string, error) {
+	r := c.c.newRequest("GET", "/v1/catalog/datacenters")
+	_, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out []string
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Nodes is used to query all the known nodes
+func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) {
+	r := c.c.newRequest("GET", "/v1/catalog/nodes")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*Node
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Services is used to query for all known services
+func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) {
+	r := c.c.newRequest("GET", "/v1/catalog/services")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out map[string][]string
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Service is used to query catalog entries for a given service
+func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
+	var tags []string
+	if tag != "" {
+		tags = []string{tag}
+	}
+	return c.service(service, tags, q, false)
+}
+
+// Supports multiple tags for filtering
+func (c *Catalog) ServiceMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
+	return c.service(service, tags, q, false)
+}
+
+// Connect is used to query catalog entries for a given Connect-enabled service
+func (c *Catalog) Connect(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
+	var tags []string
+	if tag != "" {
+		tags = []string{tag}
+	}
+	return c.service(service, tags, q, true)
+}
+
+// Supports multiple tags for filtering
+func (c *Catalog) ConnectMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
+	return c.service(service, tags, q, true)
+}
+
+func (c *Catalog) service(service string, tags []string, q *QueryOptions, connect bool) ([]*CatalogService, *QueryMeta, error) {
+	path := "/v1/catalog/service/" + service
+	if connect {
+		path = "/v1/catalog/connect/" + service
+	}
+	r := c.c.newRequest("GET", path)
+	r.setQueryOptions(q)
+	if len(tags) > 0 {
+		for _, tag := range tags {
+			r.params.Add("tag", tag)
+		}
+	}
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*CatalogService
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Node is used to query for service information about a single node
+func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) {
+	r := c.c.newRequest("GET", "/v1/catalog/node/"+node)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out *CatalogNode
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/connect.go b/vendor/github.com/hashicorp/consul/api/connect.go
new file mode 100644
index 0000000..a40d1e2
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/connect.go
@@ -0,0 +1,12 @@
+package api
+
+// Connect can be used to work with endpoints related to Connect, the
+// feature for securely connecting services within Consul.
+type Connect struct {
+	c *Client
+}
+
+// Connect returns a handle to the connect-related endpoints
+func (c *Client) Connect() *Connect {
+	return &Connect{c}
+}
diff --git a/vendor/github.com/hashicorp/consul/api/connect_ca.go b/vendor/github.com/hashicorp/consul/api/connect_ca.go
new file mode 100644
index 0000000..a863d21
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/connect_ca.go
@@ -0,0 +1,172 @@
+package api
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/mitchellh/mapstructure"
+)
+
+// CAConfig is the structure for the Connect CA configuration.
+type CAConfig struct {
+	// Provider is the CA provider implementation to use.
+	Provider string
+
+	// Configuration is arbitrary configuration for the provider. This
+	// should only contain primitive values and containers (such as lists
+	// and maps).
+	Config map[string]interface{}
+
+	CreateIndex uint64
+	ModifyIndex uint64
+}
+
+// CommonCAProviderConfig is the common options available to all CA providers.
+type CommonCAProviderConfig struct {
+	LeafCertTTL time.Duration
+}
+
+// ConsulCAProviderConfig is the config for the built-in Consul CA provider.
+type ConsulCAProviderConfig struct {
+	CommonCAProviderConfig `mapstructure:",squash"`
+
+	PrivateKey     string
+	RootCert       string
+	RotationPeriod time.Duration
+}
+
+// ParseConsulCAConfig takes a raw config map and returns a parsed
+// ConsulCAProviderConfig.
+func ParseConsulCAConfig(raw map[string]interface{}) (*ConsulCAProviderConfig, error) {
+	var config ConsulCAProviderConfig
+	decodeConf := &mapstructure.DecoderConfig{
+		DecodeHook:       mapstructure.StringToTimeDurationHookFunc(),
+		ErrorUnused:      true,
+		Result:           &config,
+		WeaklyTypedInput: true,
+	}
+
+	decoder, err := mapstructure.NewDecoder(decodeConf)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := decoder.Decode(raw); err != nil {
+		return nil, fmt.Errorf("error decoding config: %s", err)
+	}
+
+	return &config, nil
+}
+
+// CARootList is the structure for the results of listing roots.
+type CARootList struct {
+	ActiveRootID string
+	TrustDomain  string
+	Roots        []*CARoot
+}
+
+// CARoot represents a root CA certificate that is trusted.
+type CARoot struct {
+	// ID is a globally unique ID (UUID) representing this CA root.
+	ID string
+
+	// Name is a human-friendly name for this CA root. This value is
+	// opaque to Consul and is not used for anything internally.
+	Name string
+
+	// RootCertPEM is the PEM-encoded public certificate.
+	RootCertPEM string `json:"RootCert"`
+
+	// Active is true if this is the current active CA. This must only
+	// be true for exactly one CA. For any method that modifies roots in the
+	// state store, tests should be written to verify that multiple roots
+	// cannot be active.
+	Active bool
+
+	CreateIndex uint64
+	ModifyIndex uint64
+}
+
+// LeafCert is a certificate that has been issued by a Connect CA.
+type LeafCert struct {
+	// SerialNumber is the unique serial number for this certificate.
+	// This is encoded in standard hex separated by :.
+	SerialNumber string
+
+	// CertPEM and PrivateKeyPEM are the PEM-encoded certificate and private
+	// key for that cert, respectively. This should not be stored in the
+	// state store, but is present in the sign API response.
+	CertPEM       string `json:",omitempty"`
+	PrivateKeyPEM string `json:",omitempty"`
+
+	// Service is the name of the service for which the cert was issued.
+	// ServiceURI is the cert URI value.
+	Service    string
+	ServiceURI string
+
+	// ValidAfter and ValidBefore are the validity periods for the
+	// certificate.
+	ValidAfter  time.Time
+	ValidBefore time.Time
+
+	CreateIndex uint64
+	ModifyIndex uint64
+}
+
+// CARoots queries the list of available roots.
+func (h *Connect) CARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) {
+	r := h.c.newRequest("GET", "/v1/connect/ca/roots")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out CARootList
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return &out, qm, nil
+}
+
+// CAGetConfig returns the current CA configuration.
+func (h *Connect) CAGetConfig(q *QueryOptions) (*CAConfig, *QueryMeta, error) {
+	r := h.c.newRequest("GET", "/v1/connect/ca/configuration")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out CAConfig
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return &out, qm, nil
+}
+
+// CASetConfig sets the current CA configuration.
+func (h *Connect) CASetConfig(conf *CAConfig, q *WriteOptions) (*WriteMeta, error) {
+	r := h.c.newRequest("PUT", "/v1/connect/ca/configuration")
+	r.setWriteOptions(q)
+	r.obj = conf
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{}
+	wm.RequestTime = rtt
+	return wm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/connect_intention.go b/vendor/github.com/hashicorp/consul/api/connect_intention.go
new file mode 100644
index 0000000..a996c03
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/connect_intention.go
@@ -0,0 +1,302 @@
+package api
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"time"
+)
+
+// Intention defines an intention for the Connect Service Graph. This defines
+// the allowed or denied behavior of a connection between two services using
+// Connect.
+type Intention struct {
+	// ID is the UUID-based ID for the intention, always generated by Consul.
+	ID string
+
+	// Description is a human-friendly description of this intention.
+	// It is opaque to Consul and is only stored and transferred in API
+	// requests.
+	Description string
+
+	// SourceNS, SourceName are the namespace and name, respectively, of
+	// the source service. Either of these may be the wildcard "*", but only
+	// the full value can be a wildcard. Partial wildcards are not allowed.
+	// The source may also be a non-Consul service, as specified by SourceType.
+	//
+	// DestinationNS, DestinationName is the same, but for the destination
+	// service. The same rules apply. The destination is always a Consul
+	// service.
+	SourceNS, SourceName           string
+	DestinationNS, DestinationName string
+
+	// SourceType is the type of the value for the source.
+	SourceType IntentionSourceType
+
+	// Action is whether this is a whitelist or blacklist intention.
+	Action IntentionAction
+
+	// DefaultAddr, DefaultPort of the local listening proxy (if any) to
+	// make this connection.
+	DefaultAddr string
+	DefaultPort int
+
+	// Meta is arbitrary metadata associated with the intention. This is
+	// opaque to Consul but is served in API responses.
+	Meta map[string]string
+
+	// Precedence is the order that the intention will be applied, with
+	// larger numbers being applied first. This is a read-only field, on
+	// any intention update it is updated.
+	Precedence int
+
+	// CreatedAt and UpdatedAt keep track of when this record was created
+	// or modified.
+	CreatedAt, UpdatedAt time.Time
+
+	CreateIndex uint64
+	ModifyIndex uint64
+}
+
+// String returns human-friendly output describing ths intention.
+func (i *Intention) String() string {
+	return fmt.Sprintf("%s => %s (%s)",
+		i.SourceString(),
+		i.DestinationString(),
+		i.Action)
+}
+
+// SourceString returns the namespace/name format for the source, or
+// just "name" if the namespace is the default namespace.
+func (i *Intention) SourceString() string {
+	return i.partString(i.SourceNS, i.SourceName)
+}
+
+// DestinationString returns the namespace/name format for the source, or
+// just "name" if the namespace is the default namespace.
+func (i *Intention) DestinationString() string {
+	return i.partString(i.DestinationNS, i.DestinationName)
+}
+
+func (i *Intention) partString(ns, n string) string {
+	// For now we omit the default namespace from the output. In the future
+	// we might want to look at this and show this in a multi-namespace world.
+	if ns != "" && ns != IntentionDefaultNamespace {
+		n = ns + "/" + n
+	}
+
+	return n
+}
+
+// IntentionDefaultNamespace is the default namespace value.
+const IntentionDefaultNamespace = "default"
+
+// IntentionAction is the action that the intention represents. This
+// can be "allow" or "deny" to whitelist or blacklist intentions.
+type IntentionAction string
+
+const (
+	IntentionActionAllow IntentionAction = "allow"
+	IntentionActionDeny  IntentionAction = "deny"
+)
+
+// IntentionSourceType is the type of the source within an intention.
+type IntentionSourceType string
+
+const (
+	// IntentionSourceConsul is a service within the Consul catalog.
+	IntentionSourceConsul IntentionSourceType = "consul"
+)
+
+// IntentionMatch are the arguments for the intention match API.
+type IntentionMatch struct {
+	By    IntentionMatchType
+	Names []string
+}
+
+// IntentionMatchType is the target for a match request. For example,
+// matching by source will look for all intentions that match the given
+// source value.
+type IntentionMatchType string
+
+const (
+	IntentionMatchSource      IntentionMatchType = "source"
+	IntentionMatchDestination IntentionMatchType = "destination"
+)
+
+// IntentionCheck are the arguments for the intention check API. For
+// more documentation see the IntentionCheck function.
+type IntentionCheck struct {
+	// Source and Destination are the source and destination values to
+	// check. The destination is always a Consul service, but the source
+	// may be other values as defined by the SourceType.
+	Source, Destination string
+
+	// SourceType is the type of the value for the source.
+	SourceType IntentionSourceType
+}
+
+// Intentions returns the list of intentions.
+func (h *Connect) Intentions(q *QueryOptions) ([]*Intention, *QueryMeta, error) {
+	r := h.c.newRequest("GET", "/v1/connect/intentions")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*Intention
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// IntentionGet retrieves a single intention.
+func (h *Connect) IntentionGet(id string, q *QueryOptions) (*Intention, *QueryMeta, error) {
+	r := h.c.newRequest("GET", "/v1/connect/intentions/"+id)
+	r.setQueryOptions(q)
+	rtt, resp, err := h.c.doRequest(r)
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	if resp.StatusCode == 404 {
+		return nil, qm, nil
+	} else if resp.StatusCode != 200 {
+		var buf bytes.Buffer
+		io.Copy(&buf, resp.Body)
+		return nil, nil, fmt.Errorf(
+			"Unexpected response %d: %s", resp.StatusCode, buf.String())
+	}
+
+	var out Intention
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return &out, qm, nil
+}
+
+// IntentionDelete deletes a single intention.
+func (h *Connect) IntentionDelete(id string, q *WriteOptions) (*WriteMeta, error) {
+	r := h.c.newRequest("DELETE", "/v1/connect/intentions/"+id)
+	r.setWriteOptions(q)
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &WriteMeta{}
+	qm.RequestTime = rtt
+
+	return qm, nil
+}
+
+// IntentionMatch returns the list of intentions that match a given source
+// or destination. The returned intentions are ordered by precedence where
+// result[0] is the highest precedence (if that matches, then that rule overrides
+// all other rules).
+//
+// Matching can be done for multiple names at the same time. The resulting
+// map is keyed by the given names. Casing is preserved.
+func (h *Connect) IntentionMatch(args *IntentionMatch, q *QueryOptions) (map[string][]*Intention, *QueryMeta, error) {
+	r := h.c.newRequest("GET", "/v1/connect/intentions/match")
+	r.setQueryOptions(q)
+	r.params.Set("by", string(args.By))
+	for _, name := range args.Names {
+		r.params.Add("name", name)
+	}
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out map[string][]*Intention
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// IntentionCheck returns whether a given source/destination would be allowed
+// or not given the current set of intentions and the configuration of Consul.
+func (h *Connect) IntentionCheck(args *IntentionCheck, q *QueryOptions) (bool, *QueryMeta, error) {
+	r := h.c.newRequest("GET", "/v1/connect/intentions/check")
+	r.setQueryOptions(q)
+	r.params.Set("source", args.Source)
+	r.params.Set("destination", args.Destination)
+	if args.SourceType != "" {
+		r.params.Set("source-type", string(args.SourceType))
+	}
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return false, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out struct{ Allowed bool }
+	if err := decodeBody(resp, &out); err != nil {
+		return false, nil, err
+	}
+	return out.Allowed, qm, nil
+}
+
+// IntentionCreate will create a new intention. The ID in the given
+// structure must be empty and a generate ID will be returned on
+// success.
+func (c *Connect) IntentionCreate(ixn *Intention, q *WriteOptions) (string, *WriteMeta, error) {
+	r := c.c.newRequest("POST", "/v1/connect/intentions")
+	r.setWriteOptions(q)
+	r.obj = ixn
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return "", nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{}
+	wm.RequestTime = rtt
+
+	var out struct{ ID string }
+	if err := decodeBody(resp, &out); err != nil {
+		return "", nil, err
+	}
+	return out.ID, wm, nil
+}
+
+// IntentionUpdate will update an existing intention. The ID in the given
+// structure must be non-empty.
+func (c *Connect) IntentionUpdate(ixn *Intention, q *WriteOptions) (*WriteMeta, error) {
+	r := c.c.newRequest("PUT", "/v1/connect/intentions/"+ixn.ID)
+	r.setWriteOptions(q)
+	r.obj = ixn
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{}
+	wm.RequestTime = rtt
+	return wm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/coordinate.go b/vendor/github.com/hashicorp/consul/api/coordinate.go
new file mode 100644
index 0000000..53318f1
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/coordinate.go
@@ -0,0 +1,106 @@
+package api
+
+import (
+	"github.com/hashicorp/serf/coordinate"
+)
+
+// CoordinateEntry represents a node and its associated network coordinate.
+type CoordinateEntry struct {
+	Node    string
+	Segment string
+	Coord   *coordinate.Coordinate
+}
+
+// CoordinateDatacenterMap has the coordinates for servers in a given datacenter
+// and area. Network coordinates are only compatible within the same area.
+type CoordinateDatacenterMap struct {
+	Datacenter  string
+	AreaID      string
+	Coordinates []CoordinateEntry
+}
+
+// Coordinate can be used to query the coordinate endpoints
+type Coordinate struct {
+	c *Client
+}
+
+// Coordinate returns a handle to the coordinate endpoints
+func (c *Client) Coordinate() *Coordinate {
+	return &Coordinate{c}
+}
+
+// Datacenters is used to return the coordinates of all the servers in the WAN
+// pool.
+func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) {
+	r := c.c.newRequest("GET", "/v1/coordinate/datacenters")
+	_, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out []*CoordinateDatacenterMap
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Nodes is used to return the coordinates of all the nodes in the LAN pool.
+func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) {
+	r := c.c.newRequest("GET", "/v1/coordinate/nodes")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*CoordinateEntry
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Update inserts or updates the LAN coordinate of a node.
+func (c *Coordinate) Update(coord *CoordinateEntry, q *WriteOptions) (*WriteMeta, error) {
+	r := c.c.newRequest("PUT", "/v1/coordinate/update")
+	r.setWriteOptions(q)
+	r.obj = coord
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{}
+	wm.RequestTime = rtt
+
+	return wm, nil
+}
+
+// Node is used to return the coordinates of a single in the LAN pool.
+func (c *Coordinate) Node(node string, q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) {
+	r := c.c.newRequest("GET", "/v1/coordinate/node/"+node)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*CoordinateEntry
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/debug.go b/vendor/github.com/hashicorp/consul/api/debug.go
new file mode 100644
index 0000000..2380468
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/debug.go
@@ -0,0 +1,106 @@
+package api
+
+import (
+	"fmt"
+	"io/ioutil"
+	"strconv"
+)
+
+// Debug can be used to query the /debug/pprof endpoints to gather
+// profiling information about the target agent.Debug
+//
+// The agent must have enable_debug set to true for profiling to be enabled
+// and for these endpoints to function.
+type Debug struct {
+	c *Client
+}
+
+// Debug returns a handle that exposes the internal debug endpoints.
+func (c *Client) Debug() *Debug {
+	return &Debug{c}
+}
+
+// Heap returns a pprof heap dump
+func (d *Debug) Heap() ([]byte, error) {
+	r := d.c.newRequest("GET", "/debug/pprof/heap")
+	_, resp, err := d.c.doRequest(r)
+	if err != nil {
+		return nil, fmt.Errorf("error making request: %s", err)
+	}
+	defer resp.Body.Close()
+
+	// We return a raw response because we're just passing through a response
+	// from the pprof handlers
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, fmt.Errorf("error decoding body: %s", err)
+	}
+
+	return body, nil
+}
+
+// Profile returns a pprof CPU profile for the specified number of seconds
+func (d *Debug) Profile(seconds int) ([]byte, error) {
+	r := d.c.newRequest("GET", "/debug/pprof/profile")
+
+	// Capture a profile for the specified number of seconds
+	r.params.Set("seconds", strconv.Itoa(seconds))
+
+	_, resp, err := d.c.doRequest(r)
+	if err != nil {
+		return nil, fmt.Errorf("error making request: %s", err)
+	}
+	defer resp.Body.Close()
+
+	// We return a raw response because we're just passing through a response
+	// from the pprof handlers
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, fmt.Errorf("error decoding body: %s", err)
+	}
+
+	return body, nil
+}
+
+// Trace returns an execution trace
+func (d *Debug) Trace(seconds int) ([]byte, error) {
+	r := d.c.newRequest("GET", "/debug/pprof/trace")
+
+	// Capture a trace for the specified number of seconds
+	r.params.Set("seconds", strconv.Itoa(seconds))
+
+	_, resp, err := d.c.doRequest(r)
+	if err != nil {
+		return nil, fmt.Errorf("error making request: %s", err)
+	}
+	defer resp.Body.Close()
+
+	// We return a raw response because we're just passing through a response
+	// from the pprof handlers
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, fmt.Errorf("error decoding body: %s", err)
+	}
+
+	return body, nil
+}
+
+// Goroutine returns a pprof goroutine profile
+func (d *Debug) Goroutine() ([]byte, error) {
+	r := d.c.newRequest("GET", "/debug/pprof/goroutine")
+
+	_, resp, err := d.c.doRequest(r)
+	if err != nil {
+		return nil, fmt.Errorf("error making request: %s", err)
+	}
+	defer resp.Body.Close()
+
+	// We return a raw response because we're just passing through a response
+	// from the pprof handlers
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, fmt.Errorf("error decoding body: %s", err)
+	}
+
+	return body, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go
new file mode 100644
index 0000000..85b5b06
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/event.go
@@ -0,0 +1,104 @@
+package api
+
+import (
+	"bytes"
+	"strconv"
+)
+
+// Event can be used to query the Event endpoints
+type Event struct {
+	c *Client
+}
+
+// UserEvent represents an event that was fired by the user
+type UserEvent struct {
+	ID            string
+	Name          string
+	Payload       []byte
+	NodeFilter    string
+	ServiceFilter string
+	TagFilter     string
+	Version       int
+	LTime         uint64
+}
+
+// Event returns a handle to the event endpoints
+func (c *Client) Event() *Event {
+	return &Event{c}
+}
+
+// Fire is used to fire a new user event. Only the Name, Payload and Filters
+// are respected. This returns the ID or an associated error. Cross DC requests
+// are supported.
+func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) {
+	r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name)
+	r.setWriteOptions(q)
+	if params.NodeFilter != "" {
+		r.params.Set("node", params.NodeFilter)
+	}
+	if params.ServiceFilter != "" {
+		r.params.Set("service", params.ServiceFilter)
+	}
+	if params.TagFilter != "" {
+		r.params.Set("tag", params.TagFilter)
+	}
+	if params.Payload != nil {
+		r.body = bytes.NewReader(params.Payload)
+	}
+
+	rtt, resp, err := requireOK(e.c.doRequest(r))
+	if err != nil {
+		return "", nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out UserEvent
+	if err := decodeBody(resp, &out); err != nil {
+		return "", nil, err
+	}
+	return out.ID, wm, nil
+}
+
+// List is used to get the most recent events an agent has received.
+// This list can be optionally filtered by the name. This endpoint supports
+// quasi-blocking queries. The index is not monotonic, nor does it provide provide
+// LastContact or KnownLeader.
+func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) {
+	r := e.c.newRequest("GET", "/v1/event/list")
+	r.setQueryOptions(q)
+	if name != "" {
+		r.params.Set("name", name)
+	}
+	rtt, resp, err := requireOK(e.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var entries []*UserEvent
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
+
+// IDToIndex is a bit of a hack. This simulates the index generation to
+// convert an event ID into a WaitIndex.
+func (e *Event) IDToIndex(uuid string) uint64 {
+	lower := uuid[0:8] + uuid[9:13] + uuid[14:18]
+	upper := uuid[19:23] + uuid[24:36]
+	lowVal, err := strconv.ParseUint(lower, 16, 64)
+	if err != nil {
+		panic("Failed to convert " + lower)
+	}
+	highVal, err := strconv.ParseUint(upper, 16, 64)
+	if err != nil {
+		panic("Failed to convert " + upper)
+	}
+	return lowVal ^ highVal
+}
diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go
new file mode 100644
index 0000000..eae6a01
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/health.go
@@ -0,0 +1,250 @@
+package api
+
+import (
+	"fmt"
+	"strings"
+)
+
+const (
+	// HealthAny is special, and is used as a wild card,
+	// not as a specific state.
+	HealthAny      = "any"
+	HealthPassing  = "passing"
+	HealthWarning  = "warning"
+	HealthCritical = "critical"
+	HealthMaint    = "maintenance"
+)
+
+const (
+	// NodeMaint is the special key set by a node in maintenance mode.
+	NodeMaint = "_node_maintenance"
+
+	// ServiceMaintPrefix is the prefix for a service in maintenance mode.
+	ServiceMaintPrefix = "_service_maintenance:"
+)
+
+// HealthCheck is used to represent a single check
+type HealthCheck struct {
+	Node        string
+	CheckID     string
+	Name        string
+	Status      string
+	Notes       string
+	Output      string
+	ServiceID   string
+	ServiceName string
+	ServiceTags []string
+
+	Definition HealthCheckDefinition
+}
+
+// HealthCheckDefinition is used to store the details about
+// a health check's execution.
+type HealthCheckDefinition struct {
+	HTTP                           string
+	Header                         map[string][]string
+	Method                         string
+	TLSSkipVerify                  bool
+	TCP                            string
+	Interval                       ReadableDuration
+	Timeout                        ReadableDuration
+	DeregisterCriticalServiceAfter ReadableDuration
+}
+
+// HealthChecks is a collection of HealthCheck structs.
+type HealthChecks []*HealthCheck
+
+// AggregatedStatus returns the "best" status for the list of health checks.
+// Because a given entry may have many service and node-level health checks
+// attached, this function determines the best representative of the status as
+// as single string using the following heuristic:
+//
+//  maintenance > critical > warning > passing
+//
+func (c HealthChecks) AggregatedStatus() string {
+	var passing, warning, critical, maintenance bool
+	for _, check := range c {
+		id := string(check.CheckID)
+		if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) {
+			maintenance = true
+			continue
+		}
+
+		switch check.Status {
+		case HealthPassing:
+			passing = true
+		case HealthWarning:
+			warning = true
+		case HealthCritical:
+			critical = true
+		default:
+			return ""
+		}
+	}
+
+	switch {
+	case maintenance:
+		return HealthMaint
+	case critical:
+		return HealthCritical
+	case warning:
+		return HealthWarning
+	case passing:
+		return HealthPassing
+	default:
+		return HealthPassing
+	}
+}
+
+// ServiceEntry is used for the health service endpoint
+type ServiceEntry struct {
+	Node    *Node
+	Service *AgentService
+	Checks  HealthChecks
+}
+
+// Health can be used to query the Health endpoints
+type Health struct {
+	c *Client
+}
+
+// Health returns a handle to the health endpoints
+func (c *Client) Health() *Health {
+	return &Health{c}
+}
+
+// Node is used to query for checks belonging to a given node
+func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {
+	r := h.c.newRequest("GET", "/v1/health/node/"+node)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out HealthChecks
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Checks is used to return the checks associated with a service
+func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {
+	r := h.c.newRequest("GET", "/v1/health/checks/"+service)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out HealthChecks
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Service is used to query health information along with service info
+// for a given service. It can optionally do server-side filtering on a tag
+// or nodes with passing health checks only.
+func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
+	var tags []string
+	if tag != "" {
+		tags = []string{tag}
+	}
+	return h.service(service, tags, passingOnly, q, false)
+}
+
+func (h *Health) ServiceMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
+	return h.service(service, tags, passingOnly, q, false)
+}
+
+// Connect is equivalent to Service except that it will only return services
+// which are Connect-enabled and will returns the connection address for Connect
+// client's to use which may be a proxy in front of the named service. If
+// passingOnly is true only instances where both the service and any proxy are
+// healthy will be returned.
+func (h *Health) Connect(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
+	var tags []string
+	if tag != "" {
+		tags = []string{tag}
+	}
+	return h.service(service, tags, passingOnly, q, true)
+}
+
+func (h *Health) ConnectMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
+	return h.service(service, tags, passingOnly, q, true)
+}
+
+func (h *Health) service(service string, tags []string, passingOnly bool, q *QueryOptions, connect bool) ([]*ServiceEntry, *QueryMeta, error) {
+	path := "/v1/health/service/" + service
+	if connect {
+		path = "/v1/health/connect/" + service
+	}
+	r := h.c.newRequest("GET", path)
+	r.setQueryOptions(q)
+	if len(tags) > 0 {
+		for _, tag := range tags {
+			r.params.Add("tag", tag)
+		}
+	}
+	if passingOnly {
+		r.params.Set(HealthPassing, "1")
+	}
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*ServiceEntry
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// State is used to retrieve all the checks in a given state.
+// The wildcard "any" state can also be used for all checks.
+func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {
+	switch state {
+	case HealthAny:
+	case HealthWarning:
+	case HealthCritical:
+	case HealthPassing:
+	default:
+		return nil, nil, fmt.Errorf("Unsupported state: %v", state)
+	}
+	r := h.c.newRequest("GET", "/v1/health/state/"+state)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out HealthChecks
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go
new file mode 100644
index 0000000..97f5156
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/kv.go
@@ -0,0 +1,420 @@
+package api
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"net/http"
+	"strconv"
+	"strings"
+)
+
+// KVPair is used to represent a single K/V entry
+type KVPair struct {
+	// Key is the name of the key. It is also part of the URL path when accessed
+	// via the API.
+	Key string
+
+	// CreateIndex holds the index corresponding the creation of this KVPair. This
+	// is a read-only field.
+	CreateIndex uint64
+
+	// ModifyIndex is used for the Check-And-Set operations and can also be fed
+	// back into the WaitIndex of the QueryOptions in order to perform blocking
+	// queries.
+	ModifyIndex uint64
+
+	// LockIndex holds the index corresponding to a lock on this key, if any. This
+	// is a read-only field.
+	LockIndex uint64
+
+	// Flags are any user-defined flags on the key. It is up to the implementer
+	// to check these values, since Consul does not treat them specially.
+	Flags uint64
+
+	// Value is the value for the key. This can be any value, but it will be
+	// base64 encoded upon transport.
+	Value []byte
+
+	// Session is a string representing the ID of the session. Any other
+	// interactions with this key over the same session must specify the same
+	// session ID.
+	Session string
+}
+
+// KVPairs is a list of KVPair objects
+type KVPairs []*KVPair
+
+// KVOp constants give possible operations available in a KVTxn.
+type KVOp string
+
+const (
+	KVSet            KVOp = "set"
+	KVDelete         KVOp = "delete"
+	KVDeleteCAS      KVOp = "delete-cas"
+	KVDeleteTree     KVOp = "delete-tree"
+	KVCAS            KVOp = "cas"
+	KVLock           KVOp = "lock"
+	KVUnlock         KVOp = "unlock"
+	KVGet            KVOp = "get"
+	KVGetTree        KVOp = "get-tree"
+	KVCheckSession   KVOp = "check-session"
+	KVCheckIndex     KVOp = "check-index"
+	KVCheckNotExists KVOp = "check-not-exists"
+)
+
+// KVTxnOp defines a single operation inside a transaction.
+type KVTxnOp struct {
+	Verb    KVOp
+	Key     string
+	Value   []byte
+	Flags   uint64
+	Index   uint64
+	Session string
+}
+
+// KVTxnOps defines a set of operations to be performed inside a single
+// transaction.
+type KVTxnOps []*KVTxnOp
+
+// KVTxnResponse has the outcome of a transaction.
+type KVTxnResponse struct {
+	Results []*KVPair
+	Errors  TxnErrors
+}
+
+// KV is used to manipulate the K/V API
+type KV struct {
+	c *Client
+}
+
+// KV is used to return a handle to the K/V apis
+func (c *Client) KV() *KV {
+	return &KV{c}
+}
+
+// Get is used to lookup a single key. The returned pointer
+// to the KVPair will be nil if the key does not exist.
+func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) {
+	resp, qm, err := k.getInternal(key, nil, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	if resp == nil {
+		return nil, qm, nil
+	}
+	defer resp.Body.Close()
+
+	var entries []*KVPair
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	if len(entries) > 0 {
+		return entries[0], qm, nil
+	}
+	return nil, qm, nil
+}
+
+// List is used to lookup all keys under a prefix
+func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) {
+	resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	if resp == nil {
+		return nil, qm, nil
+	}
+	defer resp.Body.Close()
+
+	var entries []*KVPair
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
+
+// Keys is used to list all the keys under a prefix. Optionally,
+// a separator can be used to limit the responses.
+func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) {
+	params := map[string]string{"keys": ""}
+	if separator != "" {
+		params["separator"] = separator
+	}
+	resp, qm, err := k.getInternal(prefix, params, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	if resp == nil {
+		return nil, qm, nil
+	}
+	defer resp.Body.Close()
+
+	var entries []string
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
+
+func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) {
+	r := k.c.newRequest("GET", "/v1/kv/"+strings.TrimPrefix(key, "/"))
+	r.setQueryOptions(q)
+	for param, val := range params {
+		r.params.Set(param, val)
+	}
+	rtt, resp, err := k.c.doRequest(r)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	if resp.StatusCode == 404 {
+		resp.Body.Close()
+		return nil, qm, nil
+	} else if resp.StatusCode != 200 {
+		resp.Body.Close()
+		return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
+	}
+	return resp, qm, nil
+}
+
+// Put is used to write a new value. Only the
+// Key, Flags and Value is respected.
+func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) {
+	params := make(map[string]string, 1)
+	if p.Flags != 0 {
+		params["flags"] = strconv.FormatUint(p.Flags, 10)
+	}
+	_, wm, err := k.put(p.Key, params, p.Value, q)
+	return wm, err
+}
+
+// CAS is used for a Check-And-Set operation. The Key,
+// ModifyIndex, Flags and Value are respected. Returns true
+// on success or false on failures.
+func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+	params := make(map[string]string, 2)
+	if p.Flags != 0 {
+		params["flags"] = strconv.FormatUint(p.Flags, 10)
+	}
+	params["cas"] = strconv.FormatUint(p.ModifyIndex, 10)
+	return k.put(p.Key, params, p.Value, q)
+}
+
+// Acquire is used for a lock acquisition operation. The Key,
+// Flags, Value and Session are respected. Returns true
+// on success or false on failures.
+func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+	params := make(map[string]string, 2)
+	if p.Flags != 0 {
+		params["flags"] = strconv.FormatUint(p.Flags, 10)
+	}
+	params["acquire"] = p.Session
+	return k.put(p.Key, params, p.Value, q)
+}
+
+// Release is used for a lock release operation. The Key,
+// Flags, Value and Session are respected. Returns true
+// on success or false on failures.
+func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+	params := make(map[string]string, 2)
+	if p.Flags != 0 {
+		params["flags"] = strconv.FormatUint(p.Flags, 10)
+	}
+	params["release"] = p.Session
+	return k.put(p.Key, params, p.Value, q)
+}
+
+func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) {
+	if len(key) > 0 && key[0] == '/' {
+		return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key)
+	}
+
+	r := k.c.newRequest("PUT", "/v1/kv/"+key)
+	r.setWriteOptions(q)
+	for param, val := range params {
+		r.params.Set(param, val)
+	}
+	r.body = bytes.NewReader(body)
+	rtt, resp, err := requireOK(k.c.doRequest(r))
+	if err != nil {
+		return false, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &WriteMeta{}
+	qm.RequestTime = rtt
+
+	var buf bytes.Buffer
+	if _, err := io.Copy(&buf, resp.Body); err != nil {
+		return false, nil, fmt.Errorf("Failed to read response: %v", err)
+	}
+	res := strings.Contains(buf.String(), "true")
+	return res, qm, nil
+}
+
+// Delete is used to delete a single key
+func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) {
+	_, qm, err := k.deleteInternal(key, nil, w)
+	return qm, err
+}
+
+// DeleteCAS is used for a Delete Check-And-Set operation. The Key
+// and ModifyIndex are respected. Returns true on success or false on failures.
+func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+	params := map[string]string{
+		"cas": strconv.FormatUint(p.ModifyIndex, 10),
+	}
+	return k.deleteInternal(p.Key, params, q)
+}
+
+// DeleteTree is used to delete all keys under a prefix
+func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) {
+	_, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w)
+	return qm, err
+}
+
+func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) {
+	r := k.c.newRequest("DELETE", "/v1/kv/"+strings.TrimPrefix(key, "/"))
+	r.setWriteOptions(q)
+	for param, val := range params {
+		r.params.Set(param, val)
+	}
+	rtt, resp, err := requireOK(k.c.doRequest(r))
+	if err != nil {
+		return false, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &WriteMeta{}
+	qm.RequestTime = rtt
+
+	var buf bytes.Buffer
+	if _, err := io.Copy(&buf, resp.Body); err != nil {
+		return false, nil, fmt.Errorf("Failed to read response: %v", err)
+	}
+	res := strings.Contains(buf.String(), "true")
+	return res, qm, nil
+}
+
+// TxnOp is the internal format we send to Consul. It's not specific to KV,
+// though currently only KV operations are supported.
+type TxnOp struct {
+	KV *KVTxnOp
+}
+
+// TxnOps is a list of transaction operations.
+type TxnOps []*TxnOp
+
+// TxnResult is the internal format we receive from Consul.
+type TxnResult struct {
+	KV *KVPair
+}
+
+// TxnResults is a list of TxnResult objects.
+type TxnResults []*TxnResult
+
+// TxnError is used to return information about an operation in a transaction.
+type TxnError struct {
+	OpIndex int
+	What    string
+}
+
+// TxnErrors is a list of TxnError objects.
+type TxnErrors []*TxnError
+
+// TxnResponse is the internal format we receive from Consul.
+type TxnResponse struct {
+	Results TxnResults
+	Errors  TxnErrors
+}
+
+// Txn is used to apply multiple KV operations in a single, atomic transaction.
+//
+// Note that Go will perform the required base64 encoding on the values
+// automatically because the type is a byte slice. Transactions are defined as a
+// list of operations to perform, using the KVOp constants and KVTxnOp structure
+// to define operations. If any operation fails, none of the changes are applied
+// to the state store. Note that this hides the internal raw transaction interface
+// and munges the input and output types into KV-specific ones for ease of use.
+// If there are more non-KV operations in the future we may break out a new
+// transaction API client, but it will be easy to keep this KV-specific variant
+// supported.
+//
+// Even though this is generally a write operation, we take a QueryOptions input
+// and return a QueryMeta output. If the transaction contains only read ops, then
+// Consul will fast-path it to a different endpoint internally which supports
+// consistency controls, but not blocking. If there are write operations then
+// the request will always be routed through raft and any consistency settings
+// will be ignored.
+//
+// Here's an example:
+//
+//	   ops := KVTxnOps{
+//		   &KVTxnOp{
+//			   Verb:    KVLock,
+//			   Key:     "test/lock",
+//			   Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
+//			   Value:   []byte("hello"),
+//		   },
+//		   &KVTxnOp{
+//			   Verb:    KVGet,
+//			   Key:     "another/key",
+//		   },
+//	   }
+//	   ok, response, _, err := kv.Txn(&ops, nil)
+//
+// If there is a problem making the transaction request then an error will be
+// returned. Otherwise, the ok value will be true if the transaction succeeded
+// or false if it was rolled back. The response is a structured return value which
+// will have the outcome of the transaction. Its Results member will have entries
+// for each operation. Deleted keys will have a nil entry in the, and to save
+// space, the Value of each key in the Results will be nil unless the operation
+// is a KVGet. If the transaction was rolled back, the Errors member will have
+// entries referencing the index of the operation that failed along with an error
+// message.
+func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) {
+	r := k.c.newRequest("PUT", "/v1/txn")
+	r.setQueryOptions(q)
+
+	// Convert into the internal format since this is an all-KV txn.
+	ops := make(TxnOps, 0, len(txn))
+	for _, kvOp := range txn {
+		ops = append(ops, &TxnOp{KV: kvOp})
+	}
+	r.obj = ops
+	rtt, resp, err := k.c.doRequest(r)
+	if err != nil {
+		return false, nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict {
+		var txnResp TxnResponse
+		if err := decodeBody(resp, &txnResp); err != nil {
+			return false, nil, nil, err
+		}
+
+		// Convert from the internal format.
+		kvResp := KVTxnResponse{
+			Errors: txnResp.Errors,
+		}
+		for _, result := range txnResp.Results {
+			kvResp.Results = append(kvResp.Results, result.KV)
+		}
+		return resp.StatusCode == http.StatusOK, &kvResp, qm, nil
+	}
+
+	var buf bytes.Buffer
+	if _, err := io.Copy(&buf, resp.Body); err != nil {
+		return false, nil, nil, fmt.Errorf("Failed to read response: %v", err)
+	}
+	return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String())
+}
diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go
new file mode 100644
index 0000000..82339cb
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/lock.go
@@ -0,0 +1,386 @@
+package api
+
+import (
+	"fmt"
+	"sync"
+	"time"
+)
+
+const (
+	// DefaultLockSessionName is the Session Name we assign if none is provided
+	DefaultLockSessionName = "Consul API Lock"
+
+	// DefaultLockSessionTTL is the default session TTL if no Session is provided
+	// when creating a new Lock. This is used because we do not have another
+	// other check to depend upon.
+	DefaultLockSessionTTL = "15s"
+
+	// DefaultLockWaitTime is how long we block for at a time to check if lock
+	// acquisition is possible. This affects the minimum time it takes to cancel
+	// a Lock acquisition.
+	DefaultLockWaitTime = 15 * time.Second
+
+	// DefaultLockRetryTime is how long we wait after a failed lock acquisition
+	// before attempting to do the lock again. This is so that once a lock-delay
+	// is in effect, we do not hot loop retrying the acquisition.
+	DefaultLockRetryTime = 5 * time.Second
+
+	// DefaultMonitorRetryTime is how long we wait after a failed monitor check
+	// of a lock (500 response code). This allows the monitor to ride out brief
+	// periods of unavailability, subject to the MonitorRetries setting in the
+	// lock options which is by default set to 0, disabling this feature. This
+	// affects locks and semaphores.
+	DefaultMonitorRetryTime = 2 * time.Second
+
+	// LockFlagValue is a magic flag we set to indicate a key
+	// is being used for a lock. It is used to detect a potential
+	// conflict with a semaphore.
+	LockFlagValue = 0x2ddccbc058a50c18
+)
+
+var (
+	// ErrLockHeld is returned if we attempt to double lock
+	ErrLockHeld = fmt.Errorf("Lock already held")
+
+	// ErrLockNotHeld is returned if we attempt to unlock a lock
+	// that we do not hold.
+	ErrLockNotHeld = fmt.Errorf("Lock not held")
+
+	// ErrLockInUse is returned if we attempt to destroy a lock
+	// that is in use.
+	ErrLockInUse = fmt.Errorf("Lock in use")
+
+	// ErrLockConflict is returned if the flags on a key
+	// used for a lock do not match expectation
+	ErrLockConflict = fmt.Errorf("Existing key does not match lock use")
+)
+
+// Lock is used to implement client-side leader election. It is follows the
+// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html.
+type Lock struct {
+	c    *Client
+	opts *LockOptions
+
+	isHeld       bool
+	sessionRenew chan struct{}
+	lockSession  string
+	l            sync.Mutex
+}
+
+// LockOptions is used to parameterize the Lock behavior.
+type LockOptions struct {
+	Key              string        // Must be set and have write permissions
+	Value            []byte        // Optional, value to associate with the lock
+	Session          string        // Optional, created if not specified
+	SessionOpts      *SessionEntry // Optional, options to use when creating a session
+	SessionName      string        // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given)
+	SessionTTL       string        // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given)
+	MonitorRetries   int           // Optional, defaults to 0 which means no retries
+	MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime
+	LockWaitTime     time.Duration // Optional, defaults to DefaultLockWaitTime
+	LockTryOnce      bool          // Optional, defaults to false which means try forever
+}
+
+// LockKey returns a handle to a lock struct which can be used
+// to acquire and release the mutex. The key used must have
+// write permissions.
+func (c *Client) LockKey(key string) (*Lock, error) {
+	opts := &LockOptions{
+		Key: key,
+	}
+	return c.LockOpts(opts)
+}
+
+// LockOpts returns a handle to a lock struct which can be used
+// to acquire and release the mutex. The key used must have
+// write permissions.
+func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) {
+	if opts.Key == "" {
+		return nil, fmt.Errorf("missing key")
+	}
+	if opts.SessionName == "" {
+		opts.SessionName = DefaultLockSessionName
+	}
+	if opts.SessionTTL == "" {
+		opts.SessionTTL = DefaultLockSessionTTL
+	} else {
+		if _, err := time.ParseDuration(opts.SessionTTL); err != nil {
+			return nil, fmt.Errorf("invalid SessionTTL: %v", err)
+		}
+	}
+	if opts.MonitorRetryTime == 0 {
+		opts.MonitorRetryTime = DefaultMonitorRetryTime
+	}
+	if opts.LockWaitTime == 0 {
+		opts.LockWaitTime = DefaultLockWaitTime
+	}
+	l := &Lock{
+		c:    c,
+		opts: opts,
+	}
+	return l, nil
+}
+
+// Lock attempts to acquire the lock and blocks while doing so.
+// Providing a non-nil stopCh can be used to abort the lock attempt.
+// Returns a channel that is closed if our lock is lost or an error.
+// This channel could be closed at any time due to session invalidation,
+// communication errors, operator intervention, etc. It is NOT safe to
+// assume that the lock is held until Unlock() unless the Session is specifically
+// created without any associated health checks. By default Consul sessions
+// prefer liveness over safety and an application must be able to handle
+// the lock being lost.
+func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
+	// Hold the lock as we try to acquire
+	l.l.Lock()
+	defer l.l.Unlock()
+
+	// Check if we already hold the lock
+	if l.isHeld {
+		return nil, ErrLockHeld
+	}
+
+	// Check if we need to create a session first
+	l.lockSession = l.opts.Session
+	if l.lockSession == "" {
+		s, err := l.createSession()
+		if err != nil {
+			return nil, fmt.Errorf("failed to create session: %v", err)
+		}
+
+		l.sessionRenew = make(chan struct{})
+		l.lockSession = s
+		session := l.c.Session()
+		go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew)
+
+		// If we fail to acquire the lock, cleanup the session
+		defer func() {
+			if !l.isHeld {
+				close(l.sessionRenew)
+				l.sessionRenew = nil
+			}
+		}()
+	}
+
+	// Setup the query options
+	kv := l.c.KV()
+	qOpts := &QueryOptions{
+		WaitTime: l.opts.LockWaitTime,
+	}
+
+	start := time.Now()
+	attempts := 0
+WAIT:
+	// Check if we should quit
+	select {
+	case <-stopCh:
+		return nil, nil
+	default:
+	}
+
+	// Handle the one-shot mode.
+	if l.opts.LockTryOnce && attempts > 0 {
+		elapsed := time.Since(start)
+		if elapsed > l.opts.LockWaitTime {
+			return nil, nil
+		}
+
+		// Query wait time should not exceed the lock wait time
+		qOpts.WaitTime = l.opts.LockWaitTime - elapsed
+	}
+	attempts++
+
+	// Look for an existing lock, blocking until not taken
+	pair, meta, err := kv.Get(l.opts.Key, qOpts)
+	if err != nil {
+		return nil, fmt.Errorf("failed to read lock: %v", err)
+	}
+	if pair != nil && pair.Flags != LockFlagValue {
+		return nil, ErrLockConflict
+	}
+	locked := false
+	if pair != nil && pair.Session == l.lockSession {
+		goto HELD
+	}
+	if pair != nil && pair.Session != "" {
+		qOpts.WaitIndex = meta.LastIndex
+		goto WAIT
+	}
+
+	// Try to acquire the lock
+	pair = l.lockEntry(l.lockSession)
+	locked, _, err = kv.Acquire(pair, nil)
+	if err != nil {
+		return nil, fmt.Errorf("failed to acquire lock: %v", err)
+	}
+
+	// Handle the case of not getting the lock
+	if !locked {
+		// Determine why the lock failed
+		qOpts.WaitIndex = 0
+		pair, meta, err = kv.Get(l.opts.Key, qOpts)
+		if pair != nil && pair.Session != "" {
+			//If the session is not null, this means that a wait can safely happen
+			//using a long poll
+			qOpts.WaitIndex = meta.LastIndex
+			goto WAIT
+		} else {
+			// If the session is empty and the lock failed to acquire, then it means
+			// a lock-delay is in effect and a timed wait must be used
+			select {
+			case <-time.After(DefaultLockRetryTime):
+				goto WAIT
+			case <-stopCh:
+				return nil, nil
+			}
+		}
+	}
+
+HELD:
+	// Watch to ensure we maintain leadership
+	leaderCh := make(chan struct{})
+	go l.monitorLock(l.lockSession, leaderCh)
+
+	// Set that we own the lock
+	l.isHeld = true
+
+	// Locked! All done
+	return leaderCh, nil
+}
+
+// Unlock released the lock. It is an error to call this
+// if the lock is not currently held.
+func (l *Lock) Unlock() error {
+	// Hold the lock as we try to release
+	l.l.Lock()
+	defer l.l.Unlock()
+
+	// Ensure the lock is actually held
+	if !l.isHeld {
+		return ErrLockNotHeld
+	}
+
+	// Set that we no longer own the lock
+	l.isHeld = false
+
+	// Stop the session renew
+	if l.sessionRenew != nil {
+		defer func() {
+			close(l.sessionRenew)
+			l.sessionRenew = nil
+		}()
+	}
+
+	// Get the lock entry, and clear the lock session
+	lockEnt := l.lockEntry(l.lockSession)
+	l.lockSession = ""
+
+	// Release the lock explicitly
+	kv := l.c.KV()
+	_, _, err := kv.Release(lockEnt, nil)
+	if err != nil {
+		return fmt.Errorf("failed to release lock: %v", err)
+	}
+	return nil
+}
+
+// Destroy is used to cleanup the lock entry. It is not necessary
+// to invoke. It will fail if the lock is in use.
+func (l *Lock) Destroy() error {
+	// Hold the lock as we try to release
+	l.l.Lock()
+	defer l.l.Unlock()
+
+	// Check if we already hold the lock
+	if l.isHeld {
+		return ErrLockHeld
+	}
+
+	// Look for an existing lock
+	kv := l.c.KV()
+	pair, _, err := kv.Get(l.opts.Key, nil)
+	if err != nil {
+		return fmt.Errorf("failed to read lock: %v", err)
+	}
+
+	// Nothing to do if the lock does not exist
+	if pair == nil {
+		return nil
+	}
+
+	// Check for possible flag conflict
+	if pair.Flags != LockFlagValue {
+		return ErrLockConflict
+	}
+
+	// Check if it is in use
+	if pair.Session != "" {
+		return ErrLockInUse
+	}
+
+	// Attempt the delete
+	didRemove, _, err := kv.DeleteCAS(pair, nil)
+	if err != nil {
+		return fmt.Errorf("failed to remove lock: %v", err)
+	}
+	if !didRemove {
+		return ErrLockInUse
+	}
+	return nil
+}
+
+// createSession is used to create a new managed session
+func (l *Lock) createSession() (string, error) {
+	session := l.c.Session()
+	se := l.opts.SessionOpts
+	if se == nil {
+		se = &SessionEntry{
+			Name: l.opts.SessionName,
+			TTL:  l.opts.SessionTTL,
+		}
+	}
+	id, _, err := session.Create(se, nil)
+	if err != nil {
+		return "", err
+	}
+	return id, nil
+}
+
+// lockEntry returns a formatted KVPair for the lock
+func (l *Lock) lockEntry(session string) *KVPair {
+	return &KVPair{
+		Key:     l.opts.Key,
+		Value:   l.opts.Value,
+		Session: session,
+		Flags:   LockFlagValue,
+	}
+}
+
+// monitorLock is a long running routine to monitor a lock ownership
+// It closes the stopCh if we lose our leadership.
+func (l *Lock) monitorLock(session string, stopCh chan struct{}) {
+	defer close(stopCh)
+	kv := l.c.KV()
+	opts := &QueryOptions{RequireConsistent: true}
+WAIT:
+	retries := l.opts.MonitorRetries
+RETRY:
+	pair, meta, err := kv.Get(l.opts.Key, opts)
+	if err != nil {
+		// If configured we can try to ride out a brief Consul unavailability
+		// by doing retries. Note that we have to attempt the retry in a non-
+		// blocking fashion so that we have a clean place to reset the retry
+		// counter if service is restored.
+		if retries > 0 && IsRetryableError(err) {
+			time.Sleep(l.opts.MonitorRetryTime)
+			retries--
+			opts.WaitIndex = 0
+			goto RETRY
+		}
+		return
+	}
+	if pair != nil && pair.Session == session {
+		opts.WaitIndex = meta.LastIndex
+		goto WAIT
+	}
+}
diff --git a/vendor/github.com/hashicorp/consul/api/operator.go b/vendor/github.com/hashicorp/consul/api/operator.go
new file mode 100644
index 0000000..079e224
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/operator.go
@@ -0,0 +1,11 @@
+package api
+
+// Operator can be used to perform low-level operator tasks for Consul.
+type Operator struct {
+	c *Client
+}
+
+// Operator returns a handle to the operator endpoints.
+func (c *Client) Operator() *Operator {
+	return &Operator{c}
+}
diff --git a/vendor/github.com/hashicorp/consul/api/operator_area.go b/vendor/github.com/hashicorp/consul/api/operator_area.go
new file mode 100644
index 0000000..5cf7e49
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/operator_area.go
@@ -0,0 +1,194 @@
+package api
+
+// The /v1/operator/area endpoints are available only in Consul Enterprise and
+// interact with its network area subsystem. Network areas are used to link
+// together Consul servers in different Consul datacenters. With network areas,
+// Consul datacenters can be linked together in ways other than a fully-connected
+// mesh, as is required for Consul's WAN.
+
+import (
+	"net"
+	"time"
+)
+
+// Area defines a network area.
+type Area struct {
+	// ID is this identifier for an area (a UUID). This must be left empty
+	// when creating a new area.
+	ID string
+
+	// PeerDatacenter is the peer Consul datacenter that will make up the
+	// other side of this network area. Network areas always involve a pair
+	// of datacenters: the datacenter where the area was created, and the
+	// peer datacenter. This is required.
+	PeerDatacenter string
+
+	// RetryJoin specifies the address of Consul servers to join to, such as
+	// an IPs or hostnames with an optional port number. This is optional.
+	RetryJoin []string
+
+	// UseTLS specifies whether gossip over this area should be encrypted with TLS
+	// if possible.
+	UseTLS bool
+}
+
+// AreaJoinResponse is returned when a join occurs and gives the result for each
+// address.
+type AreaJoinResponse struct {
+	// The address that was joined.
+	Address string
+
+	// Whether or not the join was a success.
+	Joined bool
+
+	// If we couldn't join, this is the message with information.
+	Error string
+}
+
+// SerfMember is a generic structure for reporting information about members in
+// a Serf cluster. This is only used by the area endpoints right now, but this
+// could be expanded to other endpoints in the future.
+type SerfMember struct {
+	// ID is the node identifier (a UUID).
+	ID string
+
+	// Name is the node name.
+	Name string
+
+	// Addr has the IP address.
+	Addr net.IP
+
+	// Port is the RPC port.
+	Port uint16
+
+	// Datacenter is the DC name.
+	Datacenter string
+
+	// Role is "client", "server", or "unknown".
+	Role string
+
+	// Build has the version of the Consul agent.
+	Build string
+
+	// Protocol is the protocol of the Consul agent.
+	Protocol int
+
+	// Status is the Serf health status "none", "alive", "leaving", "left",
+	// or "failed".
+	Status string
+
+	// RTT is the estimated round trip time from the server handling the
+	// request to the this member. This will be negative if no RTT estimate
+	// is available.
+	RTT time.Duration
+}
+
+// AreaCreate will create a new network area. The ID in the given structure must
+// be empty and a generated ID will be returned on success.
+func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta, error) {
+	r := op.c.newRequest("POST", "/v1/operator/area")
+	r.setWriteOptions(q)
+	r.obj = area
+	rtt, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return "", nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{}
+	wm.RequestTime = rtt
+
+	var out struct{ ID string }
+	if err := decodeBody(resp, &out); err != nil {
+		return "", nil, err
+	}
+	return out.ID, wm, nil
+}
+
+// AreaUpdate will update the configuration of the network area with the given ID.
+func (op *Operator) AreaUpdate(areaID string, area *Area, q *WriteOptions) (string, *WriteMeta, error) {
+	r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID)
+	r.setWriteOptions(q)
+	r.obj = area
+	rtt, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return "", nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{}
+	wm.RequestTime = rtt
+
+	var out struct{ ID string }
+	if err := decodeBody(resp, &out); err != nil {
+		return "", nil, err
+	}
+	return out.ID, wm, nil
+}
+
+// AreaGet returns a single network area.
+func (op *Operator) AreaGet(areaID string, q *QueryOptions) ([]*Area, *QueryMeta, error) {
+	var out []*Area
+	qm, err := op.c.query("/v1/operator/area/"+areaID, &out, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// AreaList returns all the available network areas.
+func (op *Operator) AreaList(q *QueryOptions) ([]*Area, *QueryMeta, error) {
+	var out []*Area
+	qm, err := op.c.query("/v1/operator/area", &out, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// AreaDelete deletes the given network area.
+func (op *Operator) AreaDelete(areaID string, q *WriteOptions) (*WriteMeta, error) {
+	r := op.c.newRequest("DELETE", "/v1/operator/area/"+areaID)
+	r.setWriteOptions(q)
+	rtt, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{}
+	wm.RequestTime = rtt
+	return wm, nil
+}
+
+// AreaJoin attempts to join the given set of join addresses to the given
+// network area. See the Area structure for details about join addresses.
+func (op *Operator) AreaJoin(areaID string, addresses []string, q *WriteOptions) ([]*AreaJoinResponse, *WriteMeta, error) {
+	r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID+"/join")
+	r.setWriteOptions(q)
+	r.obj = addresses
+	rtt, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{}
+	wm.RequestTime = rtt
+
+	var out []*AreaJoinResponse
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, wm, nil
+}
+
+// AreaMembers lists the Serf information about the members in the given area.
+func (op *Operator) AreaMembers(areaID string, q *QueryOptions) ([]*SerfMember, *QueryMeta, error) {
+	var out []*SerfMember
+	qm, err := op.c.query("/v1/operator/area/"+areaID+"/members", &out, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go
new file mode 100644
index 0000000..b179406
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go
@@ -0,0 +1,219 @@
+package api
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// AutopilotConfiguration is used for querying/setting the Autopilot configuration.
+// Autopilot helps manage operator tasks related to Consul servers like removing
+// failed servers from the Raft quorum.
+type AutopilotConfiguration struct {
+	// CleanupDeadServers controls whether to remove dead servers from the Raft
+	// peer list when a new server joins
+	CleanupDeadServers bool
+
+	// LastContactThreshold is the limit on the amount of time a server can go
+	// without leader contact before being considered unhealthy.
+	LastContactThreshold *ReadableDuration
+
+	// MaxTrailingLogs is the amount of entries in the Raft Log that a server can
+	// be behind before being considered unhealthy.
+	MaxTrailingLogs uint64
+
+	// ServerStabilizationTime is the minimum amount of time a server must be
+	// in a stable, healthy state before it can be added to the cluster. Only
+	// applicable with Raft protocol version 3 or higher.
+	ServerStabilizationTime *ReadableDuration
+
+	// (Enterprise-only) RedundancyZoneTag is the node tag to use for separating
+	// servers into zones for redundancy. If left blank, this feature will be disabled.
+	RedundancyZoneTag string
+
+	// (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration
+	// strategy of waiting until enough newer-versioned servers have been added to the
+	// cluster before promoting them to voters.
+	DisableUpgradeMigration bool
+
+	// (Enterprise-only) UpgradeVersionTag is the node tag to use for version info when
+	// performing upgrade migrations. If left blank, the Consul version will be used.
+	UpgradeVersionTag string
+
+	// CreateIndex holds the index corresponding the creation of this configuration.
+	// This is a read-only field.
+	CreateIndex uint64
+
+	// ModifyIndex will be set to the index of the last update when retrieving the
+	// Autopilot configuration. Resubmitting a configuration with
+	// AutopilotCASConfiguration will perform a check-and-set operation which ensures
+	// there hasn't been a subsequent update since the configuration was retrieved.
+	ModifyIndex uint64
+}
+
+// ServerHealth is the health (from the leader's point of view) of a server.
+type ServerHealth struct {
+	// ID is the raft ID of the server.
+	ID string
+
+	// Name is the node name of the server.
+	Name string
+
+	// Address is the address of the server.
+	Address string
+
+	// The status of the SerfHealth check for the server.
+	SerfStatus string
+
+	// Version is the Consul version of the server.
+	Version string
+
+	// Leader is whether this server is currently the leader.
+	Leader bool
+
+	// LastContact is the time since this node's last contact with the leader.
+	LastContact *ReadableDuration
+
+	// LastTerm is the highest leader term this server has a record of in its Raft log.
+	LastTerm uint64
+
+	// LastIndex is the last log index this server has a record of in its Raft log.
+	LastIndex uint64
+
+	// Healthy is whether or not the server is healthy according to the current
+	// Autopilot config.
+	Healthy bool
+
+	// Voter is whether this is a voting server.
+	Voter bool
+
+	// StableSince is the last time this server's Healthy value changed.
+	StableSince time.Time
+}
+
+// OperatorHealthReply is a representation of the overall health of the cluster
+type OperatorHealthReply struct {
+	// Healthy is true if all the servers in the cluster are healthy.
+	Healthy bool
+
+	// FailureTolerance is the number of healthy servers that could be lost without
+	// an outage occurring.
+	FailureTolerance int
+
+	// Servers holds the health of each server.
+	Servers []ServerHealth
+}
+
+// ReadableDuration is a duration type that is serialized to JSON in human readable format.
+type ReadableDuration time.Duration
+
+func NewReadableDuration(dur time.Duration) *ReadableDuration {
+	d := ReadableDuration(dur)
+	return &d
+}
+
+func (d *ReadableDuration) String() string {
+	return d.Duration().String()
+}
+
+func (d *ReadableDuration) Duration() time.Duration {
+	if d == nil {
+		return time.Duration(0)
+	}
+	return time.Duration(*d)
+}
+
+func (d *ReadableDuration) MarshalJSON() ([]byte, error) {
+	return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil
+}
+
+func (d *ReadableDuration) UnmarshalJSON(raw []byte) error {
+	if d == nil {
+		return fmt.Errorf("cannot unmarshal to nil pointer")
+	}
+
+	str := string(raw)
+	if len(str) < 2 || str[0] != '"' || str[len(str)-1] != '"' {
+		return fmt.Errorf("must be enclosed with quotes: %s", str)
+	}
+	dur, err := time.ParseDuration(str[1 : len(str)-1])
+	if err != nil {
+		return err
+	}
+	*d = ReadableDuration(dur)
+	return nil
+}
+
+// AutopilotGetConfiguration is used to query the current Autopilot configuration.
+func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, error) {
+	r := op.c.newRequest("GET", "/v1/operator/autopilot/configuration")
+	r.setQueryOptions(q)
+	_, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out AutopilotConfiguration
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+
+	return &out, nil
+}
+
+// AutopilotSetConfiguration is used to set the current Autopilot configuration.
+func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *WriteOptions) error {
+	r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration")
+	r.setWriteOptions(q)
+	r.obj = conf
+	_, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// AutopilotCASConfiguration is used to perform a Check-And-Set update on the
+// Autopilot configuration. The ModifyIndex value will be respected. Returns
+// true on success or false on failures.
+func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (bool, error) {
+	r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration")
+	r.setWriteOptions(q)
+	r.params.Set("cas", strconv.FormatUint(conf.ModifyIndex, 10))
+	r.obj = conf
+	_, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return false, err
+	}
+	defer resp.Body.Close()
+
+	var buf bytes.Buffer
+	if _, err := io.Copy(&buf, resp.Body); err != nil {
+		return false, fmt.Errorf("Failed to read response: %v", err)
+	}
+	res := strings.Contains(buf.String(), "true")
+
+	return res, nil
+}
+
+// AutopilotServerHealth
+func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) {
+	r := op.c.newRequest("GET", "/v1/operator/autopilot/health")
+	r.setQueryOptions(q)
+	_, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out OperatorHealthReply
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return &out, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/operator_keyring.go b/vendor/github.com/hashicorp/consul/api/operator_keyring.go
new file mode 100644
index 0000000..6b61429
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/operator_keyring.go
@@ -0,0 +1,86 @@
+package api
+
+// keyringRequest is used for performing Keyring operations
+type keyringRequest struct {
+	Key string
+}
+
+// KeyringResponse is returned when listing the gossip encryption keys
+type KeyringResponse struct {
+	// Whether this response is for a WAN ring
+	WAN bool
+
+	// The datacenter name this request corresponds to
+	Datacenter string
+
+	// Segment has the network segment this request corresponds to.
+	Segment string
+
+	// A map of the encryption keys to the number of nodes they're installed on
+	Keys map[string]int
+
+	// The total number of nodes in this ring
+	NumNodes int
+}
+
+// KeyringInstall is used to install a new gossip encryption key into the cluster
+func (op *Operator) KeyringInstall(key string, q *WriteOptions) error {
+	r := op.c.newRequest("POST", "/v1/operator/keyring")
+	r.setWriteOptions(q)
+	r.obj = keyringRequest{
+		Key: key,
+	}
+	_, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// KeyringList is used to list the gossip keys installed in the cluster
+func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) {
+	r := op.c.newRequest("GET", "/v1/operator/keyring")
+	r.setQueryOptions(q)
+	_, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out []*KeyringResponse
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// KeyringRemove is used to remove a gossip encryption key from the cluster
+func (op *Operator) KeyringRemove(key string, q *WriteOptions) error {
+	r := op.c.newRequest("DELETE", "/v1/operator/keyring")
+	r.setWriteOptions(q)
+	r.obj = keyringRequest{
+		Key: key,
+	}
+	_, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// KeyringUse is used to change the active gossip encryption key
+func (op *Operator) KeyringUse(key string, q *WriteOptions) error {
+	r := op.c.newRequest("PUT", "/v1/operator/keyring")
+	r.setWriteOptions(q)
+	r.obj = keyringRequest{
+		Key: key,
+	}
+	_, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go
new file mode 100644
index 0000000..a9844df
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/operator_raft.go
@@ -0,0 +1,89 @@
+package api
+
+// RaftServer has information about a server in the Raft configuration.
+type RaftServer struct {
+	// ID is the unique ID for the server. These are currently the same
+	// as the address, but they will be changed to a real GUID in a future
+	// release of Consul.
+	ID string
+
+	// Node is the node name of the server, as known by Consul, or this
+	// will be set to "(unknown)" otherwise.
+	Node string
+
+	// Address is the IP:port of the server, used for Raft communications.
+	Address string
+
+	// Leader is true if this server is the current cluster leader.
+	Leader bool
+
+	// Protocol version is the raft protocol version used by the server
+	ProtocolVersion string
+
+	// Voter is true if this server has a vote in the cluster. This might
+	// be false if the server is staging and still coming online, or if
+	// it's a non-voting server, which will be added in a future release of
+	// Consul.
+	Voter bool
+}
+
+// RaftConfiguration is returned when querying for the current Raft configuration.
+type RaftConfiguration struct {
+	// Servers has the list of servers in the Raft configuration.
+	Servers []*RaftServer
+
+	// Index has the Raft index of this configuration.
+	Index uint64
+}
+
+// RaftGetConfiguration is used to query the current Raft peer set.
+func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) {
+	r := op.c.newRequest("GET", "/v1/operator/raft/configuration")
+	r.setQueryOptions(q)
+	_, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out RaftConfiguration
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return &out, nil
+}
+
+// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft
+// quorum but no longer known to Serf or the catalog) by address in the form of
+// "IP:port".
+func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error {
+	r := op.c.newRequest("DELETE", "/v1/operator/raft/peer")
+	r.setWriteOptions(q)
+
+	r.params.Set("address", string(address))
+
+	_, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+
+	resp.Body.Close()
+	return nil
+}
+
+// RaftRemovePeerByID is used to kick a stale peer (one that it in the Raft
+// quorum but no longer known to Serf or the catalog) by ID.
+func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error {
+	r := op.c.newRequest("DELETE", "/v1/operator/raft/peer")
+	r.setWriteOptions(q)
+
+	r.params.Set("id", string(id))
+
+	_, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+
+	resp.Body.Close()
+	return nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/operator_segment.go b/vendor/github.com/hashicorp/consul/api/operator_segment.go
new file mode 100644
index 0000000..92b05d3
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/operator_segment.go
@@ -0,0 +1,11 @@
+package api
+
+// SegmentList returns all the available LAN segments.
+func (op *Operator) SegmentList(q *QueryOptions) ([]string, *QueryMeta, error) {
+	var out []string
+	qm, err := op.c.query("/v1/operator/segment", &out, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go
new file mode 100644
index 0000000..8bb1004
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/prepared_query.go
@@ -0,0 +1,212 @@
+package api
+
+// QueryDatacenterOptions sets options about how we fail over if there are no
+// healthy nodes in the local datacenter.
+type QueryDatacenterOptions struct {
+	// NearestN is set to the number of remote datacenters to try, based on
+	// network coordinates.
+	NearestN int
+
+	// Datacenters is a fixed list of datacenters to try after NearestN. We
+	// never try a datacenter multiple times, so those are subtracted from
+	// this list before proceeding.
+	Datacenters []string
+}
+
+// QueryDNSOptions controls settings when query results are served over DNS.
+type QueryDNSOptions struct {
+	// TTL is the time to live for the served DNS results.
+	TTL string
+}
+
+// ServiceQuery is used to query for a set of healthy nodes offering a specific
+// service.
+type ServiceQuery struct {
+	// Service is the service to query.
+	Service string
+
+	// Near allows baking in the name of a node to automatically distance-
+	// sort from. The magic "_agent" value is supported, which sorts near
+	// the agent which initiated the request by default.
+	Near string
+
+	// Failover controls what we do if there are no healthy nodes in the
+	// local datacenter.
+	Failover QueryDatacenterOptions
+
+	// IgnoreCheckIDs is an optional list of health check IDs to ignore when
+	// considering which nodes are healthy. It is useful as an emergency measure
+	// to temporarily override some health check that is producing false negatives
+	// for example.
+	IgnoreCheckIDs []string
+
+	// If OnlyPassing is true then we will only include nodes with passing
+	// health checks (critical AND warning checks will cause a node to be
+	// discarded)
+	OnlyPassing bool
+
+	// Tags are a set of required and/or disallowed tags. If a tag is in
+	// this list it must be present. If the tag is preceded with "!" then
+	// it is disallowed.
+	Tags []string
+
+	// NodeMeta is a map of required node metadata fields. If a key/value
+	// pair is in this map it must be present on the node in order for the
+	// service entry to be returned.
+	NodeMeta map[string]string
+
+	// Connect if true will filter the prepared query results to only
+	// include Connect-capable services. These include both native services
+	// and proxies for matching services. Note that if a proxy matches,
+	// the constraints in the query above (Near, OnlyPassing, etc.) apply
+	// to the _proxy_ and not the service being proxied. In practice, proxies
+	// should be directly next to their services so this isn't an issue.
+	Connect bool
+}
+
+// QueryTemplate carries the arguments for creating a templated query.
+type QueryTemplate struct {
+	// Type specifies the type of the query template. Currently only
+	// "name_prefix_match" is supported. This field is required.
+	Type string
+
+	// Regexp allows specifying a regex pattern to match against the name
+	// of the query being executed.
+	Regexp string
+}
+
+// PreparedQueryDefinition defines a complete prepared query.
+type PreparedQueryDefinition struct {
+	// ID is this UUID-based ID for the query, always generated by Consul.
+	ID string
+
+	// Name is an optional friendly name for the query supplied by the
+	// user. NOTE - if this feature is used then it will reduce the security
+	// of any read ACL associated with this query/service since this name
+	// can be used to locate nodes with supplying any ACL.
+	Name string
+
+	// Session is an optional session to tie this query's lifetime to. If
+	// this is omitted then the query will not expire.
+	Session string
+
+	// Token is the ACL token used when the query was created, and it is
+	// used when a query is subsequently executed. This token, or a token
+	// with management privileges, must be used to change the query later.
+	Token string
+
+	// Service defines a service query (leaving things open for other types
+	// later).
+	Service ServiceQuery
+
+	// DNS has options that control how the results of this query are
+	// served over DNS.
+	DNS QueryDNSOptions
+
+	// Template is used to pass through the arguments for creating a
+	// prepared query with an attached template. If a template is given,
+	// interpolations are possible in other struct fields.
+	Template QueryTemplate
+}
+
+// PreparedQueryExecuteResponse has the results of executing a query.
+type PreparedQueryExecuteResponse struct {
+	// Service is the service that was queried.
+	Service string
+
+	// Nodes has the nodes that were output by the query.
+	Nodes []ServiceEntry
+
+	// DNS has the options for serving these results over DNS.
+	DNS QueryDNSOptions
+
+	// Datacenter is the datacenter that these results came from.
+	Datacenter string
+
+	// Failovers is a count of how many times we had to query a remote
+	// datacenter.
+	Failovers int
+}
+
+// PreparedQuery can be used to query the prepared query endpoints.
+type PreparedQuery struct {
+	c *Client
+}
+
+// PreparedQuery returns a handle to the prepared query endpoints.
+func (c *Client) PreparedQuery() *PreparedQuery {
+	return &PreparedQuery{c}
+}
+
+// Create makes a new prepared query. The ID of the new query is returned.
+func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) {
+	r := c.c.newRequest("POST", "/v1/query")
+	r.setWriteOptions(q)
+	r.obj = query
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return "", nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{}
+	wm.RequestTime = rtt
+
+	var out struct{ ID string }
+	if err := decodeBody(resp, &out); err != nil {
+		return "", nil, err
+	}
+	return out.ID, wm, nil
+}
+
+// Update makes updates to an existing prepared query.
+func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) {
+	return c.c.write("/v1/query/"+query.ID, query, nil, q)
+}
+
+// List is used to fetch all the prepared queries (always requires a management
+// token).
+func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) {
+	var out []*PreparedQueryDefinition
+	qm, err := c.c.query("/v1/query", &out, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Get is used to fetch a specific prepared query.
+func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) {
+	var out []*PreparedQueryDefinition
+	qm, err := c.c.query("/v1/query/"+queryID, &out, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Delete is used to delete a specific prepared query.
+func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, error) {
+	r := c.c.newRequest("DELETE", "/v1/query/"+queryID)
+	r.setWriteOptions(q)
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{}
+	wm.RequestTime = rtt
+	return wm, nil
+}
+
+// Execute is used to execute a specific prepared query. You can execute using
+// a query ID or name.
+func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) {
+	var out *PreparedQueryExecuteResponse
+	qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go
new file mode 100644
index 0000000..745a208
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/raw.go
@@ -0,0 +1,24 @@
+package api
+
+// Raw can be used to do raw queries against custom endpoints
+type Raw struct {
+	c *Client
+}
+
+// Raw returns a handle to query endpoints
+func (c *Client) Raw() *Raw {
+	return &Raw{c}
+}
+
+// Query is used to do a GET request against an endpoint
+// and deserialize the response into an interface using
+// standard Consul conventions.
+func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
+	return raw.c.query(endpoint, out, q)
+}
+
+// Write is used to do a PUT request against an endpoint
+// and serialize/deserialized using the standard Consul conventions.
+func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {
+	return raw.c.write(endpoint, in, out, q)
+}
diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go
new file mode 100644
index 0000000..bc4f885
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/semaphore.go
@@ -0,0 +1,514 @@
+package api
+
+import (
+	"encoding/json"
+	"fmt"
+	"path"
+	"sync"
+	"time"
+)
+
+const (
+	// DefaultSemaphoreSessionName is the Session Name we assign if none is provided
+	DefaultSemaphoreSessionName = "Consul API Semaphore"
+
+	// DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided
+	// when creating a new Semaphore. This is used because we do not have another
+	// other check to depend upon.
+	DefaultSemaphoreSessionTTL = "15s"
+
+	// DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore
+	// acquisition is possible. This affects the minimum time it takes to cancel
+	// a Semaphore acquisition.
+	DefaultSemaphoreWaitTime = 15 * time.Second
+
+	// DefaultSemaphoreKey is the key used within the prefix to
+	// use for coordination between all the contenders.
+	DefaultSemaphoreKey = ".lock"
+
+	// SemaphoreFlagValue is a magic flag we set to indicate a key
+	// is being used for a semaphore. It is used to detect a potential
+	// conflict with a lock.
+	SemaphoreFlagValue = 0xe0f69a2baa414de0
+)
+
+var (
+	// ErrSemaphoreHeld is returned if we attempt to double lock
+	ErrSemaphoreHeld = fmt.Errorf("Semaphore already held")
+
+	// ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore
+	// that we do not hold.
+	ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held")
+
+	// ErrSemaphoreInUse is returned if we attempt to destroy a semaphore
+	// that is in use.
+	ErrSemaphoreInUse = fmt.Errorf("Semaphore in use")
+
+	// ErrSemaphoreConflict is returned if the flags on a key
+	// used for a semaphore do not match expectation
+	ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use")
+)
+
+// Semaphore is used to implement a distributed semaphore
+// using the Consul KV primitives.
+type Semaphore struct {
+	c    *Client
+	opts *SemaphoreOptions
+
+	isHeld       bool
+	sessionRenew chan struct{}
+	lockSession  string
+	l            sync.Mutex
+}
+
+// SemaphoreOptions is used to parameterize the Semaphore
+type SemaphoreOptions struct {
+	Prefix            string        // Must be set and have write permissions
+	Limit             int           // Must be set, and be positive
+	Value             []byte        // Optional, value to associate with the contender entry
+	Session           string        // Optional, created if not specified
+	SessionName       string        // Optional, defaults to DefaultLockSessionName
+	SessionTTL        string        // Optional, defaults to DefaultLockSessionTTL
+	MonitorRetries    int           // Optional, defaults to 0 which means no retries
+	MonitorRetryTime  time.Duration // Optional, defaults to DefaultMonitorRetryTime
+	SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime
+	SemaphoreTryOnce  bool          // Optional, defaults to false which means try forever
+}
+
+// semaphoreLock is written under the DefaultSemaphoreKey and
+// is used to coordinate between all the contenders.
+type semaphoreLock struct {
+	// Limit is the integer limit of holders. This is used to
+	// verify that all the holders agree on the value.
+	Limit int
+
+	// Holders is a list of all the semaphore holders.
+	// It maps the session ID to true. It is used as a set effectively.
+	Holders map[string]bool
+}
+
+// SemaphorePrefix is used to created a Semaphore which will operate
+// at the given KV prefix and uses the given limit for the semaphore.
+// The prefix must have write privileges, and the limit must be agreed
+// upon by all contenders.
+func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) {
+	opts := &SemaphoreOptions{
+		Prefix: prefix,
+		Limit:  limit,
+	}
+	return c.SemaphoreOpts(opts)
+}
+
+// SemaphoreOpts is used to create a Semaphore with the given options.
+// The prefix must have write privileges, and the limit must be agreed
+// upon by all contenders. If a Session is not provided, one will be created.
+func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) {
+	if opts.Prefix == "" {
+		return nil, fmt.Errorf("missing prefix")
+	}
+	if opts.Limit <= 0 {
+		return nil, fmt.Errorf("semaphore limit must be positive")
+	}
+	if opts.SessionName == "" {
+		opts.SessionName = DefaultSemaphoreSessionName
+	}
+	if opts.SessionTTL == "" {
+		opts.SessionTTL = DefaultSemaphoreSessionTTL
+	} else {
+		if _, err := time.ParseDuration(opts.SessionTTL); err != nil {
+			return nil, fmt.Errorf("invalid SessionTTL: %v", err)
+		}
+	}
+	if opts.MonitorRetryTime == 0 {
+		opts.MonitorRetryTime = DefaultMonitorRetryTime
+	}
+	if opts.SemaphoreWaitTime == 0 {
+		opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime
+	}
+	s := &Semaphore{
+		c:    c,
+		opts: opts,
+	}
+	return s, nil
+}
+
+// Acquire attempts to reserve a slot in the semaphore, blocking until
+// success, interrupted via the stopCh or an error is encountered.
+// Providing a non-nil stopCh can be used to abort the attempt.
+// On success, a channel is returned that represents our slot.
+// This channel could be closed at any time due to session invalidation,
+// communication errors, operator intervention, etc. It is NOT safe to
+// assume that the slot is held until Release() unless the Session is specifically
+// created without any associated health checks. By default Consul sessions
+// prefer liveness over safety and an application must be able to handle
+// the session being lost.
+func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) {
+	// Hold the lock as we try to acquire
+	s.l.Lock()
+	defer s.l.Unlock()
+
+	// Check if we already hold the semaphore
+	if s.isHeld {
+		return nil, ErrSemaphoreHeld
+	}
+
+	// Check if we need to create a session first
+	s.lockSession = s.opts.Session
+	if s.lockSession == "" {
+		sess, err := s.createSession()
+		if err != nil {
+			return nil, fmt.Errorf("failed to create session: %v", err)
+		}
+
+		s.sessionRenew = make(chan struct{})
+		s.lockSession = sess
+		session := s.c.Session()
+		go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew)
+
+		// If we fail to acquire the lock, cleanup the session
+		defer func() {
+			if !s.isHeld {
+				close(s.sessionRenew)
+				s.sessionRenew = nil
+			}
+		}()
+	}
+
+	// Create the contender entry
+	kv := s.c.KV()
+	made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil)
+	if err != nil || !made {
+		return nil, fmt.Errorf("failed to make contender entry: %v", err)
+	}
+
+	// Setup the query options
+	qOpts := &QueryOptions{
+		WaitTime: s.opts.SemaphoreWaitTime,
+	}
+
+	start := time.Now()
+	attempts := 0
+WAIT:
+	// Check if we should quit
+	select {
+	case <-stopCh:
+		return nil, nil
+	default:
+	}
+
+	// Handle the one-shot mode.
+	if s.opts.SemaphoreTryOnce && attempts > 0 {
+		elapsed := time.Since(start)
+		if elapsed > s.opts.SemaphoreWaitTime {
+			return nil, nil
+		}
+
+		// Query wait time should not exceed the semaphore wait time
+		qOpts.WaitTime = s.opts.SemaphoreWaitTime - elapsed
+	}
+	attempts++
+
+	// Read the prefix
+	pairs, meta, err := kv.List(s.opts.Prefix, qOpts)
+	if err != nil {
+		return nil, fmt.Errorf("failed to read prefix: %v", err)
+	}
+
+	// Decode the lock
+	lockPair := s.findLock(pairs)
+	if lockPair.Flags != SemaphoreFlagValue {
+		return nil, ErrSemaphoreConflict
+	}
+	lock, err := s.decodeLock(lockPair)
+	if err != nil {
+		return nil, err
+	}
+
+	// Verify we agree with the limit
+	if lock.Limit != s.opts.Limit {
+		return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)",
+			lock.Limit, s.opts.Limit)
+	}
+
+	// Prune the dead holders
+	s.pruneDeadHolders(lock, pairs)
+
+	// Check if the lock is held
+	if len(lock.Holders) >= lock.Limit {
+		qOpts.WaitIndex = meta.LastIndex
+		goto WAIT
+	}
+
+	// Create a new lock with us as a holder
+	lock.Holders[s.lockSession] = true
+	newLock, err := s.encodeLock(lock, lockPair.ModifyIndex)
+	if err != nil {
+		return nil, err
+	}
+
+	// Attempt the acquisition
+	didSet, _, err := kv.CAS(newLock, nil)
+	if err != nil {
+		return nil, fmt.Errorf("failed to update lock: %v", err)
+	}
+	if !didSet {
+		// Update failed, could have been a race with another contender,
+		// retry the operation
+		goto WAIT
+	}
+
+	// Watch to ensure we maintain ownership of the slot
+	lockCh := make(chan struct{})
+	go s.monitorLock(s.lockSession, lockCh)
+
+	// Set that we own the lock
+	s.isHeld = true
+
+	// Acquired! All done
+	return lockCh, nil
+}
+
+// Release is used to voluntarily give up our semaphore slot. It is
+// an error to call this if the semaphore has not been acquired.
+func (s *Semaphore) Release() error {
+	// Hold the lock as we try to release
+	s.l.Lock()
+	defer s.l.Unlock()
+
+	// Ensure the lock is actually held
+	if !s.isHeld {
+		return ErrSemaphoreNotHeld
+	}
+
+	// Set that we no longer own the lock
+	s.isHeld = false
+
+	// Stop the session renew
+	if s.sessionRenew != nil {
+		defer func() {
+			close(s.sessionRenew)
+			s.sessionRenew = nil
+		}()
+	}
+
+	// Get and clear the lock session
+	lockSession := s.lockSession
+	s.lockSession = ""
+
+	// Remove ourselves as a lock holder
+	kv := s.c.KV()
+	key := path.Join(s.opts.Prefix, DefaultSemaphoreKey)
+READ:
+	pair, _, err := kv.Get(key, nil)
+	if err != nil {
+		return err
+	}
+	if pair == nil {
+		pair = &KVPair{}
+	}
+	lock, err := s.decodeLock(pair)
+	if err != nil {
+		return err
+	}
+
+	// Create a new lock without us as a holder
+	if _, ok := lock.Holders[lockSession]; ok {
+		delete(lock.Holders, lockSession)
+		newLock, err := s.encodeLock(lock, pair.ModifyIndex)
+		if err != nil {
+			return err
+		}
+
+		// Swap the locks
+		didSet, _, err := kv.CAS(newLock, nil)
+		if err != nil {
+			return fmt.Errorf("failed to update lock: %v", err)
+		}
+		if !didSet {
+			goto READ
+		}
+	}
+
+	// Destroy the contender entry
+	contenderKey := path.Join(s.opts.Prefix, lockSession)
+	if _, err := kv.Delete(contenderKey, nil); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Destroy is used to cleanup the semaphore entry. It is not necessary
+// to invoke. It will fail if the semaphore is in use.
+func (s *Semaphore) Destroy() error {
+	// Hold the lock as we try to acquire
+	s.l.Lock()
+	defer s.l.Unlock()
+
+	// Check if we already hold the semaphore
+	if s.isHeld {
+		return ErrSemaphoreHeld
+	}
+
+	// List for the semaphore
+	kv := s.c.KV()
+	pairs, _, err := kv.List(s.opts.Prefix, nil)
+	if err != nil {
+		return fmt.Errorf("failed to read prefix: %v", err)
+	}
+
+	// Find the lock pair, bail if it doesn't exist
+	lockPair := s.findLock(pairs)
+	if lockPair.ModifyIndex == 0 {
+		return nil
+	}
+	if lockPair.Flags != SemaphoreFlagValue {
+		return ErrSemaphoreConflict
+	}
+
+	// Decode the lock
+	lock, err := s.decodeLock(lockPair)
+	if err != nil {
+		return err
+	}
+
+	// Prune the dead holders
+	s.pruneDeadHolders(lock, pairs)
+
+	// Check if there are any holders
+	if len(lock.Holders) > 0 {
+		return ErrSemaphoreInUse
+	}
+
+	// Attempt the delete
+	didRemove, _, err := kv.DeleteCAS(lockPair, nil)
+	if err != nil {
+		return fmt.Errorf("failed to remove semaphore: %v", err)
+	}
+	if !didRemove {
+		return ErrSemaphoreInUse
+	}
+	return nil
+}
+
+// createSession is used to create a new managed session
+func (s *Semaphore) createSession() (string, error) {
+	session := s.c.Session()
+	se := &SessionEntry{
+		Name:     s.opts.SessionName,
+		TTL:      s.opts.SessionTTL,
+		Behavior: SessionBehaviorDelete,
+	}
+	id, _, err := session.Create(se, nil)
+	if err != nil {
+		return "", err
+	}
+	return id, nil
+}
+
+// contenderEntry returns a formatted KVPair for the contender
+func (s *Semaphore) contenderEntry(session string) *KVPair {
+	return &KVPair{
+		Key:     path.Join(s.opts.Prefix, session),
+		Value:   s.opts.Value,
+		Session: session,
+		Flags:   SemaphoreFlagValue,
+	}
+}
+
+// findLock is used to find the KV Pair which is used for coordination
+func (s *Semaphore) findLock(pairs KVPairs) *KVPair {
+	key := path.Join(s.opts.Prefix, DefaultSemaphoreKey)
+	for _, pair := range pairs {
+		if pair.Key == key {
+			return pair
+		}
+	}
+	return &KVPair{Flags: SemaphoreFlagValue}
+}
+
+// decodeLock is used to decode a semaphoreLock from an
+// entry in Consul
+func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) {
+	// Handle if there is no lock
+	if pair == nil || pair.Value == nil {
+		return &semaphoreLock{
+			Limit:   s.opts.Limit,
+			Holders: make(map[string]bool),
+		}, nil
+	}
+
+	l := &semaphoreLock{}
+	if err := json.Unmarshal(pair.Value, l); err != nil {
+		return nil, fmt.Errorf("lock decoding failed: %v", err)
+	}
+	return l, nil
+}
+
+// encodeLock is used to encode a semaphoreLock into a KVPair
+// that can be PUT
+func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) {
+	enc, err := json.Marshal(l)
+	if err != nil {
+		return nil, fmt.Errorf("lock encoding failed: %v", err)
+	}
+	pair := &KVPair{
+		Key:         path.Join(s.opts.Prefix, DefaultSemaphoreKey),
+		Value:       enc,
+		Flags:       SemaphoreFlagValue,
+		ModifyIndex: oldIndex,
+	}
+	return pair, nil
+}
+
+// pruneDeadHolders is used to remove all the dead lock holders
+func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) {
+	// Gather all the live holders
+	alive := make(map[string]struct{}, len(pairs))
+	for _, pair := range pairs {
+		if pair.Session != "" {
+			alive[pair.Session] = struct{}{}
+		}
+	}
+
+	// Remove any holders that are dead
+	for holder := range lock.Holders {
+		if _, ok := alive[holder]; !ok {
+			delete(lock.Holders, holder)
+		}
+	}
+}
+
+// monitorLock is a long running routine to monitor a semaphore ownership
+// It closes the stopCh if we lose our slot.
+func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) {
+	defer close(stopCh)
+	kv := s.c.KV()
+	opts := &QueryOptions{RequireConsistent: true}
+WAIT:
+	retries := s.opts.MonitorRetries
+RETRY:
+	pairs, meta, err := kv.List(s.opts.Prefix, opts)
+	if err != nil {
+		// If configured we can try to ride out a brief Consul unavailability
+		// by doing retries. Note that we have to attempt the retry in a non-
+		// blocking fashion so that we have a clean place to reset the retry
+		// counter if service is restored.
+		if retries > 0 && IsRetryableError(err) {
+			time.Sleep(s.opts.MonitorRetryTime)
+			retries--
+			opts.WaitIndex = 0
+			goto RETRY
+		}
+		return
+	}
+	lockPair := s.findLock(pairs)
+	lock, err := s.decodeLock(lockPair)
+	if err != nil {
+		return
+	}
+	s.pruneDeadHolders(lock, pairs)
+	if _, ok := lock.Holders[session]; ok {
+		opts.WaitIndex = meta.LastIndex
+		goto WAIT
+	}
+}
diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go
new file mode 100644
index 0000000..1613f11
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/session.go
@@ -0,0 +1,224 @@
+package api
+
+import (
+	"errors"
+	"fmt"
+	"time"
+)
+
+const (
+	// SessionBehaviorRelease is the default behavior and causes
+	// all associated locks to be released on session invalidation.
+	SessionBehaviorRelease = "release"
+
+	// SessionBehaviorDelete is new in Consul 0.5 and changes the
+	// behavior to delete all associated locks on session invalidation.
+	// It can be used in a way similar to Ephemeral Nodes in ZooKeeper.
+	SessionBehaviorDelete = "delete"
+)
+
+var ErrSessionExpired = errors.New("session expired")
+
+// SessionEntry represents a session in consul
+type SessionEntry struct {
+	CreateIndex uint64
+	ID          string
+	Name        string
+	Node        string
+	Checks      []string
+	LockDelay   time.Duration
+	Behavior    string
+	TTL         string
+}
+
+// Session can be used to query the Session endpoints
+type Session struct {
+	c *Client
+}
+
+// Session returns a handle to the session endpoints
+func (c *Client) Session() *Session {
+	return &Session{c}
+}
+
+// CreateNoChecks is like Create but is used specifically to create
+// a session with no associated health checks.
+func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
+	body := make(map[string]interface{})
+	body["Checks"] = []string{}
+	if se != nil {
+		if se.Name != "" {
+			body["Name"] = se.Name
+		}
+		if se.Node != "" {
+			body["Node"] = se.Node
+		}
+		if se.LockDelay != 0 {
+			body["LockDelay"] = durToMsec(se.LockDelay)
+		}
+		if se.Behavior != "" {
+			body["Behavior"] = se.Behavior
+		}
+		if se.TTL != "" {
+			body["TTL"] = se.TTL
+		}
+	}
+	return s.create(body, q)
+
+}
+
+// Create makes a new session. Providing a session entry can
+// customize the session. It can also be nil to use defaults.
+func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
+	var obj interface{}
+	if se != nil {
+		body := make(map[string]interface{})
+		obj = body
+		if se.Name != "" {
+			body["Name"] = se.Name
+		}
+		if se.Node != "" {
+			body["Node"] = se.Node
+		}
+		if se.LockDelay != 0 {
+			body["LockDelay"] = durToMsec(se.LockDelay)
+		}
+		if len(se.Checks) > 0 {
+			body["Checks"] = se.Checks
+		}
+		if se.Behavior != "" {
+			body["Behavior"] = se.Behavior
+		}
+		if se.TTL != "" {
+			body["TTL"] = se.TTL
+		}
+	}
+	return s.create(obj, q)
+}
+
+func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) {
+	var out struct{ ID string }
+	wm, err := s.c.write("/v1/session/create", obj, &out, q)
+	if err != nil {
+		return "", nil, err
+	}
+	return out.ID, wm, nil
+}
+
+// Destroy invalidates a given session
+func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
+	wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q)
+	if err != nil {
+		return nil, err
+	}
+	return wm, nil
+}
+
+// Renew renews the TTL on a given session
+func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) {
+	r := s.c.newRequest("PUT", "/v1/session/renew/"+id)
+	r.setWriteOptions(q)
+	rtt, resp, err := s.c.doRequest(r)
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+
+	if resp.StatusCode == 404 {
+		return nil, wm, nil
+	} else if resp.StatusCode != 200 {
+		return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
+	}
+
+	var entries []*SessionEntry
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, fmt.Errorf("Failed to read response: %v", err)
+	}
+	if len(entries) > 0 {
+		return entries[0], wm, nil
+	}
+	return nil, wm, nil
+}
+
+// RenewPeriodic is used to periodically invoke Session.Renew on a
+// session until a doneCh is closed. This is meant to be used in a long running
+// goroutine to ensure a session stays valid.
+func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh <-chan struct{}) error {
+	ctx := q.Context()
+
+	ttl, err := time.ParseDuration(initialTTL)
+	if err != nil {
+		return err
+	}
+
+	waitDur := ttl / 2
+	lastRenewTime := time.Now()
+	var lastErr error
+	for {
+		if time.Since(lastRenewTime) > ttl {
+			return lastErr
+		}
+		select {
+		case <-time.After(waitDur):
+			entry, _, err := s.Renew(id, q)
+			if err != nil {
+				waitDur = time.Second
+				lastErr = err
+				continue
+			}
+			if entry == nil {
+				return ErrSessionExpired
+			}
+
+			// Handle the server updating the TTL
+			ttl, _ = time.ParseDuration(entry.TTL)
+			waitDur = ttl / 2
+			lastRenewTime = time.Now()
+
+		case <-doneCh:
+			// Attempt a session destroy
+			s.Destroy(id, q)
+			return nil
+
+		case <-ctx.Done():
+			// Bail immediately since attempting the destroy would
+			// use the canceled context in q, which would just bail.
+			return ctx.Err()
+		}
+	}
+}
+
+// Info looks up a single session
+func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) {
+	var entries []*SessionEntry
+	qm, err := s.c.query("/v1/session/info/"+id, &entries, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	if len(entries) > 0 {
+		return entries[0], qm, nil
+	}
+	return nil, qm, nil
+}
+
+// List gets sessions for a node
+func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
+	var entries []*SessionEntry
+	qm, err := s.c.query("/v1/session/node/"+node, &entries, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
+
+// List gets all active sessions
+func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
+	var entries []*SessionEntry
+	qm, err := s.c.query("/v1/session/list", &entries, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/snapshot.go b/vendor/github.com/hashicorp/consul/api/snapshot.go
new file mode 100644
index 0000000..e902377
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/snapshot.go
@@ -0,0 +1,47 @@
+package api
+
+import (
+	"io"
+)
+
+// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of
+// Consul's internal state and restore snapshots for disaster recovery.
+type Snapshot struct {
+	c *Client
+}
+
+// Snapshot returns a handle that exposes the snapshot endpoints.
+func (c *Client) Snapshot() *Snapshot {
+	return &Snapshot{c}
+}
+
+// Save requests a new snapshot and provides an io.ReadCloser with the snapshot
+// data to save. If this doesn't return an error, then it's the responsibility
+// of the caller to close it. Only a subset of the QueryOptions are supported:
+// Datacenter, AllowStale, and Token.
+func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) {
+	r := s.c.newRequest("GET", "/v1/snapshot")
+	r.setQueryOptions(q)
+
+	rtt, resp, err := requireOK(s.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+	return resp.Body, qm, nil
+}
+
+// Restore streams in an existing snapshot and attempts to restore it.
+func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error {
+	r := s.c.newRequest("PUT", "/v1/snapshot")
+	r.body = in
+	r.setWriteOptions(q)
+	_, _, err := requireOK(s.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go
new file mode 100644
index 0000000..74ef61a
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/status.go
@@ -0,0 +1,43 @@
+package api
+
+// Status can be used to query the Status endpoints
+type Status struct {
+	c *Client
+}
+
+// Status returns a handle to the status endpoints
+func (c *Client) Status() *Status {
+	return &Status{c}
+}
+
+// Leader is used to query for a known leader
+func (s *Status) Leader() (string, error) {
+	r := s.c.newRequest("GET", "/v1/status/leader")
+	_, resp, err := requireOK(s.c.doRequest(r))
+	if err != nil {
+		return "", err
+	}
+	defer resp.Body.Close()
+
+	var leader string
+	if err := decodeBody(resp, &leader); err != nil {
+		return "", err
+	}
+	return leader, nil
+}
+
+// Peers is used to query for a known raft peers
+func (s *Status) Peers() ([]string, error) {
+	r := s.c.newRequest("GET", "/v1/status/peers")
+	_, resp, err := requireOK(s.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var peers []string
+	if err := decodeBody(resp, &peers); err != nil {
+		return nil, err
+	}
+	return peers, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/ui-v2/app/styles/components/notice.scss b/vendor/github.com/hashicorp/consul/ui-v2/app/styles/components/notice.scss
new file mode 100644
index 0000000..3d0a22d
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/ui-v2/app/styles/components/notice.scss
@@ -0,0 +1,10 @@
+@import './notice/index';
+.notice.warning {
+  @extend %notice-warning;
+}
+.notice.info {
+  @extend %notice-info;
+}
+.notice.policy-management {
+  @extend %notice-highlight;
+}
diff --git a/vendor/github.com/hashicorp/consul/website/LICENSE.md b/vendor/github.com/hashicorp/consul/website/LICENSE.md
new file mode 100644
index 0000000..3189f43
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/website/LICENSE.md
@@ -0,0 +1,10 @@
+# Proprietary License
+
+This license is temporary while a more official one is drafted. However,
+this should make it clear:
+
+The text contents of this website are MPL 2.0 licensed.
+
+The design contents of this website are proprietary and may not be reproduced
+or reused in any way other than to run the website locally. The license for
+the design is owned solely by HashiCorp, Inc.
diff --git a/vendor/github.com/hashicorp/consul/website/source/api/operator/license.html.md b/vendor/github.com/hashicorp/consul/website/source/api/operator/license.html.md
new file mode 100644
index 0000000..dc2c2c3
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/website/source/api/operator/license.html.md
@@ -0,0 +1,145 @@
+---
+layout: api
+page_title: License - Operator - HTTP API
+sidebar_current: api-operator-license
+description: |-
+  The /operator/license endpoints allow for setting and retrieving the Consul
+  Enterprise License.
+---
+
+# License - Operator HTTP API
+
+~> **Enterprise Only!** This API endpoint and functionality only exists in
+Consul Enterprise. This is not present in the open source version of Consul.
+
+The licensing functionality described here is available only in
+[Consul Enterprise](https://www.hashicorp.com/products/consul/) version 1.1.0 and later.
+
+## Getting the Consul License
+
+This endpoint gets information about the current license.
+
+| Method | Path                         | Produces                   |
+| ------ | ---------------------------- | -------------------------- |
+| `GET` | `/operator/license`           | `application/json`         |
+
+The table below shows this endpoint's support for
+[blocking queries](/api/index.html#blocking-queries),
+[consistency modes](/api/index.html#consistency-modes),
+[agent caching](/api/index.html#agent-caching), and
+[required ACLs](/api/index.html#acls).
+
+| Blocking Queries | Consistency Modes | Agent Caching | ACL Required     |
+| ---------------- | ----------------- | ------------- | ---------------- |
+| `NO`             | `all`             | `none`        | `none`           |
+
+### Parameters
+
+- `dc` `(string: "")` - Specifies the datacenter whose license should be retrieved.
+  This will default to the datacenter of the agent serving the HTTP request.
+  This is specified as a URL query parameter.
+
+### Sample Request
+
+```text
+$ curl \
+    http://127.0.0.1:8500/v1/operator/license
+```
+
+### Sample Response
+
+```json
+{
+    "Valid": true,
+    "License": {
+        "license_id": "2afbf681-0d1a-0649-cb6c-333ec9f0989c",
+        "customer_id": "0259271d-8ffc-e85e-0830-c0822c1f5f2b",
+        "installation_id": "*",
+        "issue_time": "2018-05-21T20:03:35.911567355Z",
+        "start_time": "2018-05-21T04:00:00Z",
+        "expiration_time": "2019-05-22T03:59:59.999Z",
+        "product": "consul",
+        "flags": {
+            "package": "premium"
+        },
+        "features": [
+            "Automated Backups",
+            "Automated Upgrades",
+            "Enhanced Read Scalability",
+            "Network Segments",
+            "Redundancy Zone",
+            "Advanced Network Federation"
+        ],
+        "temporary": false
+    },
+    "Warnings": []
+}
+```
+
+## Updating the Consul License
+
+This endpoint updates the Consul license and returns some of the
+license contents as well as any warning messages regarding its validity.
+
+| Method | Path                         | Produces                   |
+| ------ | ---------------------------- | -------------------------- |
+| `PUT` | `/operator/license`           | `application/json`         |
+
+The table below shows this endpoint's support for
+[blocking queries](/api/index.html#blocking-queries),
+[consistency modes](/api/index.html#consistency-modes),
+[agent caching](/api/index.html#agent-caching), and
+[required ACLs](/api/index.html#acls).
+
+| Blocking Queries | Consistency Modes | Agent Caching | ACL Required     |
+| ---------------- | ----------------- | ------------- | ---------------- |
+| `NO`             | `none`            | `none`        | `operator:write` |
+
+### Parameters
+
+- `dc` `(string: "")` - Specifies the datacenter whose license should be updated.
+  This will default to the datacenter of the agent serving the HTTP request.
+  This is specified as a URL query parameter.
+
+### Sample Payload
+
+The payload is the raw license blob.
+
+### Sample Request
+
+```text
+$ curl \
+    --request PUT \
+    --data @consul.license \
+    http://127.0.0.1:8500/v1/operator/license
+```
+
+### Sample Response
+
+```json
+{
+    "Valid": true,
+    "License": {
+        "license_id": "2afbf681-0d1a-0649-cb6c-333ec9f0989c",
+        "customer_id": "0259271d-8ffc-e85e-0830-c0822c1f5f2b",
+        "installation_id": "*",
+        "issue_time": "2018-05-21T20:03:35.911567355Z",
+        "start_time": "2018-05-21T04:00:00Z",
+        "expiration_time": "2019-05-22T03:59:59.999Z",
+        "product": "consul",
+        "flags": {
+            "package": "premium"
+        },
+        "features": [
+            "Automated Backups",
+            "Automated Upgrades",
+            "Enhanced Read Scalability",
+            "Network Segments",
+            "Redundancy Zone",
+            "Advanced Network Federation"
+        ],
+        "temporary": false
+    },
+    "Warnings": []
+}
+```
diff --git a/vendor/github.com/hashicorp/consul/website/source/docs/commands/license.html.markdown.erb b/vendor/github.com/hashicorp/consul/website/source/docs/commands/license.html.markdown.erb
new file mode 100644
index 0000000..b65d171
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/website/source/docs/commands/license.html.markdown.erb
@@ -0,0 +1,109 @@
+---
+layout: "docs"
+page_title: "Commands: License"
+sidebar_current: "docs-commands-license"
+description: >
+  The license command provides datacenter-level management of the Consul Enterprise license.
+---
+
+# Consul License
+
+Command: `consul license`
+
+<%= enterprise_alert :consul %>
+
+The `license` command provides datacenter-level management of the Consul Enterprise license. This was added in Consul 1.1.0.
+
+If ACLs are enabled then a token with operator privileges may be required in
+order to use this command. Requests are forwarded internally to the leader
+if required, so this can be run from any Consul node in a cluster. See the
+[ACL Guide](/docs/guides/acl.html#operator) for more information.
+
+
+```text
+Usage: consul license <subcommand> [options] [args]
+
+  This command has subcommands for managing the Consul Enterprise license
+  Here are some simple examples, and more detailed examples are
+  available in the subcommands or the documentation.
+
+  Install a new license from a file:
+
+      $ consul license put @consul.license
+
+  Install a new license from stdin:
+
+      $ consul license put -
+
+  Install a new license from a string:
+
+      $ consul license put "<license blob>"
+
+  Retrieve the current license:
+
+      $ consul license get
+
+  For more examples, ask for subcommand help or view the documentation.
+
+Subcommands:
+    get    Get the current license
+    put    Puts a new license in the datacenter
+```
+
+## put
+
+This command sets the Consul Enterprise license.
+
+Usage: `consul license put [options] LICENSE`
+
+#### API Options
+
+<%= partial "docs/commands/http_api_options_client" %>
+<%= partial "docs/commands/http_api_options_server" %>
+
+The output looks like this:
+
+```
+License is valid
+License ID: 2afbf681-0d1a-0649-cb6c-333ec9f0989c
+Customer ID: 0259271d-8ffc-e85e-0830-c0822c1f5f2b
+Expires At: 2019-05-22 03:59:59.999 +0000 UTC
+Datacenter: *
+Package: premium
+Licensed Features:
+        Automated Backups
+        Automated Upgrades
+        Enhanced Read Scalability
+        Network Segments
+        Redundancy Zone
+        Advanced Network Federation
+```
+
+## get
+
+This command gets the Consul Enterprise license.
+
+Usage: `consul license get [options]`
+
+#### API Options
+
+<%= partial "docs/commands/http_api_options_client" %>
+<%= partial "docs/commands/http_api_options_server" %>
+
+The output looks like this:
+
+```
+License is valid
+License ID: 2afbf681-0d1a-0649-cb6c-333ec9f0989c
+Customer ID: 0259271d-8ffc-e85e-0830-c0822c1f5f2b
+Expires At: 2019-05-22 03:59:59.999 +0000 UTC
+Datacenter: *
+Package: premium
+Licensed Features:
+        Automated Backups
+        Automated Upgrades
+        Enhanced Read Scalability
+        Network Segments
+        Redundancy Zone
+        Advanced Network Federation
+```
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE
new file mode 100644
index 0000000..e87a115
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+     means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of
+        version 1.1 or earlier of the License, but not also under the terms of
+        a Secondary License.
+
+1.6. "Executable Form"
+
+     means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+     means a work that combines Covered Software with other material, in a
+     separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+     means this document.
+
+1.9. "Licensable"
+
+     means having the right to grant, to the maximum extent possible, whether
+     at the time of the initial grant or subsequently, any and all of the
+     rights conveyed by this License.
+
+1.10. "Modifications"
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to,
+        deletion from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+      means any patent claim(s), including without limitation, method,
+      process, and apparatus claims, in any patent Licensable by such
+      Contributor that would be infringed, but for the grant of the License,
+      by the making, using, selling, offering for sale, having made, import,
+      or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+      means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, "You" includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, "control" means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or
+        as part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its
+        Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution
+     become effective for each Contribution on the date the Contributor first
+     distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under
+     this License. No additional rights or licenses will be implied from the
+     distribution or licensing of Covered Software under this License.
+     Notwithstanding Section 2.1(b) above, no patent license is granted by a
+     Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party's
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of
+        its Contributions.
+
+     This License does not grant any rights in the trademarks, service marks,
+     or logos of any Contributor (except as may be necessary to comply with
+     the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this
+     License (see Section 10.2) or under the terms of a Secondary License (if
+     permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its
+     Contributions are its original creation(s) or it has sufficient rights to
+     grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under
+     applicable copyright doctrines of fair use, fair dealing, or other
+     equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under
+     the terms of this License. You must inform recipients that the Source
+     Code Form of the Covered Software is governed by the terms of this
+     License, and how they can obtain a copy of this License. You may not
+     attempt to alter or restrict the recipients' rights in the Source Code
+     Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this
+        License, or sublicense it under different terms, provided that the
+        license for the Executable Form does not attempt to limit or alter the
+        recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for
+     the Covered Software. If the Larger Work is a combination of Covered
+     Software with a work governed by one or more Secondary Licenses, and the
+     Covered Software is not Incompatible With Secondary Licenses, this
+     License permits You to additionally distribute such Covered Software
+     under the terms of such Secondary License(s), so that the recipient of
+     the Larger Work may, at their option, further distribute the Covered
+     Software under the terms of either this License or such Secondary
+     License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices
+     (including copyright notices, patent notices, disclaimers of warranty, or
+     limitations of liability) contained within the Source Code Form of the
+     Covered Software, except that You may alter any license notices to the
+     extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on
+     behalf of any Contributor. You must make it absolutely clear that any
+     such warranty, support, indemnity, or liability obligation is offered by
+     You alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute,
+   judicial order, or regulation then You must: (a) comply with the terms of
+   this License to the maximum extent possible; and (b) describe the
+   limitations and the code they affect. Such description must be placed in a
+   text file included with all distributions of the Covered Software under
+   this License. Except to the extent prohibited by statute or regulation,
+   such description must be sufficiently detailed for a recipient of ordinary
+   skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing
+     basis, if such Contributor fails to notify You of the non-compliance by
+     some reasonable means prior to 60 days after You have come back into
+     compliance. Moreover, Your grants from a particular Contributor are
+     reinstated on an ongoing basis if such Contributor notifies You of the
+     non-compliance by some reasonable means, this is the first time You have
+     received notice of non-compliance with this License from such
+     Contributor, and You become compliant prior to 30 days after Your receipt
+     of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions,
+     counter-claims, and cross-claims) alleging that a Contributor Version
+     directly or indirectly infringes any patent, then the rights granted to
+     You by any and all Contributors for the Covered Software under Section
+     2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an "as is" basis,
+   without warranty of any kind, either expressed, implied, or statutory,
+   including, without limitation, warranties that the Covered Software is free
+   of defects, merchantable, fit for a particular purpose or non-infringing.
+   The entire risk as to the quality and performance of the Covered Software
+   is with You. Should any Covered Software prove defective in any respect,
+   You (not any Contributor) assume the cost of any necessary servicing,
+   repair, or correction. This disclaimer of warranty constitutes an essential
+   part of this License. No use of  any Covered Software is authorized under
+   this License except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from
+   such party's negligence to the extent applicable law prohibits such
+   limitation. Some jurisdictions do not allow the exclusion or limitation of
+   incidental or consequential damages, so this exclusion and limitation may
+   not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts
+   of a jurisdiction where the defendant maintains its principal place of
+   business and such litigation shall be governed by laws of that
+   jurisdiction, without reference to its conflict-of-law provisions. Nothing
+   in this Section shall prevent a party's ability to bring cross-claims or
+   counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject
+   matter hereof. If any provision of this License is held to be
+   unenforceable, such provision shall be reformed only to the extent
+   necessary to make it enforceable. Any law or regulation which provides that
+   the language of a contract shall be construed against the drafter shall not
+   be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version
+      of the License under which You originally received the Covered Software,
+      or under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a
+      modified version of this License if you rename the license and remove
+      any references to the name of the license steward (except to note that
+      such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+      Licenses If You choose to distribute Source Code Form that is
+      Incompatible With Secondary Licenses under the terms of this version of
+      the License, the notice described in Exhibit B of this License must be
+      attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+      This Source Code Form is "Incompatible
+      With Secondary Licenses", as defined by
+      the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md
new file mode 100644
index 0000000..036e531
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-cleanhttp/README.md
@@ -0,0 +1,30 @@
+# cleanhttp
+
+Functions for accessing "clean" Go http.Client values
+
+-------------
+
+The Go standard library contains a default `http.Client` called
+`http.DefaultClient`. It is a common idiom in Go code to start with
+`http.DefaultClient` and tweak it as necessary, and in fact, this is
+encouraged; from the `http` package documentation:
+
+> The Client's Transport typically has internal state (cached TCP connections),
+so Clients should be reused instead of created as needed. Clients are safe for
+concurrent use by multiple goroutines.
+
+Unfortunately, this is a shared value, and it is not uncommon for libraries to
+assume that they are free to modify it at will. With enough dependencies, it
+can be very easy to encounter strange problems and race conditions due to
+manipulation of this shared value across libraries and goroutines (clients are
+safe for concurrent use, but writing values to the client struct itself is not
+protected).
+
+Making things worse is the fact that a bare `http.Client` will use a default
+`http.Transport` called `http.DefaultTransport`, which is another global value
+that behaves the same way. So it is not simply enough to replace
+`http.DefaultClient` with `&http.Client{}`.
+
+This repository provides some simple functions to get a "clean" `http.Client`
+-- one that uses the same default values as the Go standard library, but
+returns a client that does not share any state with other clients.
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
new file mode 100644
index 0000000..8d306bf
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
@@ -0,0 +1,57 @@
+package cleanhttp
+
+import (
+	"net"
+	"net/http"
+	"runtime"
+	"time"
+)
+
+// DefaultTransport returns a new http.Transport with similar default values to
+// http.DefaultTransport, but with idle connections and keepalives disabled.
+func DefaultTransport() *http.Transport {
+	transport := DefaultPooledTransport()
+	transport.DisableKeepAlives = true
+	transport.MaxIdleConnsPerHost = -1
+	return transport
+}
+
+// DefaultPooledTransport returns a new http.Transport with similar default
+// values to http.DefaultTransport. Do not use this for transient transports as
+// it can leak file descriptors over time. Only use this for transports that
+// will be re-used for the same host(s).
+func DefaultPooledTransport() *http.Transport {
+	transport := &http.Transport{
+		Proxy: http.ProxyFromEnvironment,
+		DialContext: (&net.Dialer{
+			Timeout:   30 * time.Second,
+			KeepAlive: 30 * time.Second,
+			DualStack: true,
+		}).DialContext,
+		MaxIdleConns:          100,
+		IdleConnTimeout:       90 * time.Second,
+		TLSHandshakeTimeout:   10 * time.Second,
+		ExpectContinueTimeout: 1 * time.Second,
+		MaxIdleConnsPerHost:   runtime.GOMAXPROCS(0) + 1,
+	}
+	return transport
+}
+
+// DefaultClient returns a new http.Client with similar default values to
+// http.Client, but with a non-shared Transport, idle connections disabled, and
+// keepalives disabled.
+func DefaultClient() *http.Client {
+	return &http.Client{
+		Transport: DefaultTransport(),
+	}
+}
+
+// DefaultPooledClient returns a new http.Client with similar default values to
+// http.Client, but with a shared Transport. Do not use this function for
+// transient clients as it can leak file descriptors over time. Only use this
+// for clients that will be re-used for the same host(s).
+func DefaultPooledClient() *http.Client {
+	return &http.Client{
+		Transport: DefaultPooledTransport(),
+	}
+}
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go
new file mode 100644
index 0000000..0584109
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-cleanhttp/doc.go
@@ -0,0 +1,20 @@
+// Package cleanhttp offers convenience utilities for acquiring "clean"
+// http.Transport and http.Client structs.
+//
+// Values set on http.DefaultClient and http.DefaultTransport affect all
+// callers. This can have detrimental effects, esepcially in TLS contexts,
+// where client or root certificates set to talk to multiple endpoints can end
+// up displacing each other, leading to hard-to-debug issues. This package
+// provides non-shared http.Client and http.Transport structs to ensure that
+// the configuration will not be overwritten by other parts of the application
+// or dependencies.
+//
+// The DefaultClient and DefaultTransport functions disable idle connections
+// and keepalives. Without ensuring that idle connections are closed before
+// garbage collection, short-term clients/transports can leak file descriptors,
+// eventually leading to "too many open files" errors. If you will be
+// connecting to the same hosts repeatedly from the same client, you can use
+// DefaultPooledClient to receive a client that has connection pooling
+// semantics similar to http.DefaultClient.
+//
+package cleanhttp
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go
new file mode 100644
index 0000000..7eda377
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go
@@ -0,0 +1,43 @@
+package cleanhttp
+
+import (
+	"net/http"
+	"strings"
+	"unicode"
+)
+
+// HandlerInput provides input options to cleanhttp's handlers
+type HandlerInput struct {
+	ErrStatus int
+}
+
+// PrintablePathCheckHandler is a middleware that ensures the request path
+// contains only printable runes.
+func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler {
+	// Nil-check on input to make it optional
+	if input == nil {
+		input = &HandlerInput{
+			ErrStatus: http.StatusBadRequest,
+		}
+	}
+
+	// Default to http.StatusBadRequest on error
+	if input.ErrStatus == 0 {
+		input.ErrStatus = http.StatusBadRequest
+	}
+
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		// Check URL path for non-printable characters
+		idx := strings.IndexFunc(r.URL.Path, func(c rune) bool {
+			return !unicode.IsPrint(c)
+		})
+
+		if idx != -1 {
+			w.WriteHeader(input.ErrStatus)
+			return
+		}
+
+		next.ServeHTTP(w, r)
+		return
+	})
+}
diff --git a/vendor/github.com/hashicorp/go-rootcerts/.travis.yml b/vendor/github.com/hashicorp/go-rootcerts/.travis.yml
new file mode 100644
index 0000000..80e1de4
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/.travis.yml
@@ -0,0 +1,12 @@
+sudo: false
+
+language: go
+
+go:
+  - 1.6
+
+branches:
+  only:
+    - master
+
+script: make test
diff --git a/vendor/github.com/hashicorp/go-rootcerts/LICENSE b/vendor/github.com/hashicorp/go-rootcerts/LICENSE
new file mode 100644
index 0000000..e87a115
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+     means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of
+        version 1.1 or earlier of the License, but not also under the terms of
+        a Secondary License.
+
+1.6. "Executable Form"
+
+     means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+     means a work that combines Covered Software with other material, in a
+     separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+     means this document.
+
+1.9. "Licensable"
+
+     means having the right to grant, to the maximum extent possible, whether
+     at the time of the initial grant or subsequently, any and all of the
+     rights conveyed by this License.
+
+1.10. "Modifications"
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to,
+        deletion from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+      means any patent claim(s), including without limitation, method,
+      process, and apparatus claims, in any patent Licensable by such
+      Contributor that would be infringed, but for the grant of the License,
+      by the making, using, selling, offering for sale, having made, import,
+      or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+      means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, "You" includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, "control" means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or
+        as part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its
+        Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution
+     become effective for each Contribution on the date the Contributor first
+     distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under
+     this License. No additional rights or licenses will be implied from the
+     distribution or licensing of Covered Software under this License.
+     Notwithstanding Section 2.1(b) above, no patent license is granted by a
+     Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party's
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of
+        its Contributions.
+
+     This License does not grant any rights in the trademarks, service marks,
+     or logos of any Contributor (except as may be necessary to comply with
+     the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this
+     License (see Section 10.2) or under the terms of a Secondary License (if
+     permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its
+     Contributions are its original creation(s) or it has sufficient rights to
+     grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under
+     applicable copyright doctrines of fair use, fair dealing, or other
+     equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under
+     the terms of this License. You must inform recipients that the Source
+     Code Form of the Covered Software is governed by the terms of this
+     License, and how they can obtain a copy of this License. You may not
+     attempt to alter or restrict the recipients' rights in the Source Code
+     Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this
+        License, or sublicense it under different terms, provided that the
+        license for the Executable Form does not attempt to limit or alter the
+        recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for
+     the Covered Software. If the Larger Work is a combination of Covered
+     Software with a work governed by one or more Secondary Licenses, and the
+     Covered Software is not Incompatible With Secondary Licenses, this
+     License permits You to additionally distribute such Covered Software
+     under the terms of such Secondary License(s), so that the recipient of
+     the Larger Work may, at their option, further distribute the Covered
+     Software under the terms of either this License or such Secondary
+     License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices
+     (including copyright notices, patent notices, disclaimers of warranty, or
+     limitations of liability) contained within the Source Code Form of the
+     Covered Software, except that You may alter any license notices to the
+     extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on
+     behalf of any Contributor. You must make it absolutely clear that any
+     such warranty, support, indemnity, or liability obligation is offered by
+     You alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute,
+   judicial order, or regulation then You must: (a) comply with the terms of
+   this License to the maximum extent possible; and (b) describe the
+   limitations and the code they affect. Such description must be placed in a
+   text file included with all distributions of the Covered Software under
+   this License. Except to the extent prohibited by statute or regulation,
+   such description must be sufficiently detailed for a recipient of ordinary
+   skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing
+     basis, if such Contributor fails to notify You of the non-compliance by
+     some reasonable means prior to 60 days after You have come back into
+     compliance. Moreover, Your grants from a particular Contributor are
+     reinstated on an ongoing basis if such Contributor notifies You of the
+     non-compliance by some reasonable means, this is the first time You have
+     received notice of non-compliance with this License from such
+     Contributor, and You become compliant prior to 30 days after Your receipt
+     of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions,
+     counter-claims, and cross-claims) alleging that a Contributor Version
+     directly or indirectly infringes any patent, then the rights granted to
+     You by any and all Contributors for the Covered Software under Section
+     2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an "as is" basis,
+   without warranty of any kind, either expressed, implied, or statutory,
+   including, without limitation, warranties that the Covered Software is free
+   of defects, merchantable, fit for a particular purpose or non-infringing.
+   The entire risk as to the quality and performance of the Covered Software
+   is with You. Should any Covered Software prove defective in any respect,
+   You (not any Contributor) assume the cost of any necessary servicing,
+   repair, or correction. This disclaimer of warranty constitutes an essential
+   part of this License. No use of  any Covered Software is authorized under
+   this License except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from
+   such party's negligence to the extent applicable law prohibits such
+   limitation. Some jurisdictions do not allow the exclusion or limitation of
+   incidental or consequential damages, so this exclusion and limitation may
+   not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts
+   of a jurisdiction where the defendant maintains its principal place of
+   business and such litigation shall be governed by laws of that
+   jurisdiction, without reference to its conflict-of-law provisions. Nothing
+   in this Section shall prevent a party's ability to bring cross-claims or
+   counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject
+   matter hereof. If any provision of this License is held to be
+   unenforceable, such provision shall be reformed only to the extent
+   necessary to make it enforceable. Any law or regulation which provides that
+   the language of a contract shall be construed against the drafter shall not
+   be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version
+      of the License under which You originally received the Covered Software,
+      or under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a
+      modified version of this License if you rename the license and remove
+      any references to the name of the license steward (except to note that
+      such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+      Licenses If You choose to distribute Source Code Form that is
+      Incompatible With Secondary Licenses under the terms of this version of
+      the License, the notice described in Exhibit B of this License must be
+      attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+      This Source Code Form is "Incompatible
+      With Secondary Licenses", as defined by
+      the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-rootcerts/Makefile b/vendor/github.com/hashicorp/go-rootcerts/Makefile
new file mode 100644
index 0000000..c3989e7
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/Makefile
@@ -0,0 +1,8 @@
+TEST?=./...
+
+test:
+	go test $(TEST) $(TESTARGS) -timeout=3s -parallel=4
+	go vet $(TEST)
+	go test $(TEST) -race
+
+.PHONY: test
diff --git a/vendor/github.com/hashicorp/go-rootcerts/README.md b/vendor/github.com/hashicorp/go-rootcerts/README.md
new file mode 100644
index 0000000..f5abffc
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/README.md
@@ -0,0 +1,43 @@
+# rootcerts
+
+Functions for loading root certificates for TLS connections.
+
+-----
+
+Go's standard library `crypto/tls` provides a common mechanism for configuring
+TLS connections in `tls.Config`. The `RootCAs` field on this struct is a pool
+of certificates for the client to use as a trust store when verifying server
+certificates.
+
+This library contains utility functions for loading certificates destined for
+that field, as well as one other important thing:
+
+When the `RootCAs` field is `nil`, the standard library attempts to load the
+host's root CA set.  This behavior is OS-specific, and the Darwin
+implementation contains [a bug that prevents trusted certificates from the
+System and Login keychains from being loaded][1]. This library contains
+Darwin-specific behavior that works around that bug.
+
+[1]: https://github.com/golang/go/issues/14514
+
+## Example Usage
+
+Here's a snippet demonstrating how this library is meant to be used:
+
+```go
+func httpClient() (*http.Client, error)
+	tlsConfig := &tls.Config{}
+	err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{
+		CAFile: os.Getenv("MYAPP_CAFILE"),
+		CAPath: os.Getenv("MYAPP_CAPATH"),
+	})
+	if err != nil {
+		return nil, err
+	}
+	c := cleanhttp.DefaultClient()
+	t := cleanhttp.DefaultTransport()
+	t.TLSClientConfig = tlsConfig
+	c.Transport = t
+	return c, nil
+}
+```
diff --git a/vendor/github.com/hashicorp/go-rootcerts/doc.go b/vendor/github.com/hashicorp/go-rootcerts/doc.go
new file mode 100644
index 0000000..b55cc62
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/doc.go
@@ -0,0 +1,9 @@
+// Package rootcerts contains functions to aid in loading CA certificates for
+// TLS connections.
+//
+// In addition, its default behavior on Darwin works around an open issue [1]
+// in Go's crypto/x509 that prevents certicates from being loaded from the
+// System or Login keychains.
+//
+// [1] https://github.com/golang/go/issues/14514
+package rootcerts
diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go
new file mode 100644
index 0000000..aeb30ec
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go
@@ -0,0 +1,103 @@
+package rootcerts
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+)
+
+// Config determines where LoadCACerts will load certificates from. When both
+// CAFile and CAPath are blank, this library's functions will either load
+// system roots explicitly and return them, or set the CertPool to nil to allow
+// Go's standard library to load system certs.
+type Config struct {
+	// CAFile is a path to a PEM-encoded certificate file or bundle. Takes
+	// precedence over CAPath.
+	CAFile string
+
+	// CAPath is a path to a directory populated with PEM-encoded certificates.
+	CAPath string
+}
+
+// ConfigureTLS sets up the RootCAs on the provided tls.Config based on the
+// Config specified.
+func ConfigureTLS(t *tls.Config, c *Config) error {
+	if t == nil {
+		return nil
+	}
+	pool, err := LoadCACerts(c)
+	if err != nil {
+		return err
+	}
+	t.RootCAs = pool
+	return nil
+}
+
+// LoadCACerts loads a CertPool based on the Config specified.
+func LoadCACerts(c *Config) (*x509.CertPool, error) {
+	if c == nil {
+		c = &Config{}
+	}
+	if c.CAFile != "" {
+		return LoadCAFile(c.CAFile)
+	}
+	if c.CAPath != "" {
+		return LoadCAPath(c.CAPath)
+	}
+
+	return LoadSystemCAs()
+}
+
+// LoadCAFile loads a single PEM-encoded file from the path specified.
+func LoadCAFile(caFile string) (*x509.CertPool, error) {
+	pool := x509.NewCertPool()
+
+	pem, err := ioutil.ReadFile(caFile)
+	if err != nil {
+		return nil, fmt.Errorf("Error loading CA File: %s", err)
+	}
+
+	ok := pool.AppendCertsFromPEM(pem)
+	if !ok {
+		return nil, fmt.Errorf("Error loading CA File: Couldn't parse PEM in: %s", caFile)
+	}
+
+	return pool, nil
+}
+
+// LoadCAPath walks the provided path and loads all certificates encounted into
+// a pool.
+func LoadCAPath(caPath string) (*x509.CertPool, error) {
+	pool := x509.NewCertPool()
+	walkFn := func(path string, info os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+
+		if info.IsDir() {
+			return nil
+		}
+
+		pem, err := ioutil.ReadFile(path)
+		if err != nil {
+			return fmt.Errorf("Error loading file from CAPath: %s", err)
+		}
+
+		ok := pool.AppendCertsFromPEM(pem)
+		if !ok {
+			return fmt.Errorf("Error loading CA Path: Couldn't parse PEM in: %s", path)
+		}
+
+		return nil
+	}
+
+	err := filepath.Walk(caPath, walkFn)
+	if err != nil {
+		return nil, err
+	}
+
+	return pool, nil
+}
diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go
new file mode 100644
index 0000000..66b1472
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go
@@ -0,0 +1,12 @@
+// +build !darwin
+
+package rootcerts
+
+import "crypto/x509"
+
+// LoadSystemCAs does nothing on non-Darwin systems. We return nil so that
+// default behavior of standard TLS config libraries is triggered, which is to
+// load system certs.
+func LoadSystemCAs() (*x509.CertPool, error) {
+	return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go
new file mode 100644
index 0000000..a9a0406
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go
@@ -0,0 +1,48 @@
+package rootcerts
+
+import (
+	"crypto/x509"
+	"os/exec"
+	"path"
+
+	"github.com/mitchellh/go-homedir"
+)
+
+// LoadSystemCAs has special behavior on Darwin systems to work around
+func LoadSystemCAs() (*x509.CertPool, error) {
+	pool := x509.NewCertPool()
+
+	for _, keychain := range certKeychains() {
+		err := addCertsFromKeychain(pool, keychain)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return pool, nil
+}
+
+func addCertsFromKeychain(pool *x509.CertPool, keychain string) error {
+	cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", keychain)
+	data, err := cmd.Output()
+	if err != nil {
+		return err
+	}
+
+	pool.AppendCertsFromPEM(data)
+
+	return nil
+}
+
+func certKeychains() []string {
+	keychains := []string{
+		"/System/Library/Keychains/SystemRootCertificates.keychain",
+		"/Library/Keychains/System.keychain",
+	}
+	home, err := homedir.Dir()
+	if err == nil {
+		loginKeychain := path.Join(home, "Library", "Keychains", "login.keychain")
+		keychains = append(keychains, loginKeychain)
+	}
+	return keychains
+}
diff --git a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem
new file mode 120000
index 0000000..dda0574
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem
@@ -0,0 +1 @@
+../capath/securetrust.pem
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem
new file mode 120000
index 0000000..37ed4f0
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem
@@ -0,0 +1 @@
+../capath/thawte.pem
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/serf/LICENSE b/vendor/github.com/hashicorp/serf/LICENSE
new file mode 100644
index 0000000..c33dcc7
--- /dev/null
+++ b/vendor/github.com/hashicorp/serf/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+     means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of version
+        1.1 or earlier of the License, but not also under the terms of a
+        Secondary License.
+
+1.6. “Executable Form”
+
+     means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+     means a work that combines Covered Software with other material, in a separate
+     file or files, that is not Covered Software.
+
+1.8. “License”
+
+     means this document.
+
+1.9. “Licensable”
+
+     means having the right to grant, to the maximum extent possible, whether at the
+     time of the initial grant or subsequently, any and all of the rights conveyed by
+     this License.
+
+1.10. “Modifications”
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to, deletion
+        from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+      means any patent claim(s), including without limitation, method, process,
+      and apparatus claims, in any patent Licensable by such Contributor that
+      would be infringed, but for the grant of the License, by the making,
+      using, selling, offering for sale, having made, import, or transfer of
+      either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+      means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, “You” includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, “control” means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or as
+        part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its Contributions
+        or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution become
+     effective for each Contribution on the date the Contributor first distributes
+     such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under this
+     License. No additional rights or licenses will be implied from the distribution
+     or licensing of Covered Software under this License. Notwithstanding Section
+     2.1(b) above, no patent license is granted by a Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party’s
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of its
+        Contributions.
+
+     This License does not grant any rights in the trademarks, service marks, or
+     logos of any Contributor (except as may be necessary to comply with the
+     notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this License
+     (see Section 10.2) or under the terms of a Secondary License (if permitted
+     under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its Contributions
+     are its original creation(s) or it has sufficient rights to grant the
+     rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under applicable
+     copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under the
+     terms of this License. You must inform recipients that the Source Code Form
+     of the Covered Software is governed by the terms of this License, and how
+     they can obtain a copy of this License. You may not attempt to alter or
+     restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this License,
+        or sublicense it under different terms, provided that the license for
+        the Executable Form does not attempt to limit or alter the recipients’
+        rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for the
+     Covered Software. If the Larger Work is a combination of Covered Software
+     with a work governed by one or more Secondary Licenses, and the Covered
+     Software is not Incompatible With Secondary Licenses, this License permits
+     You to additionally distribute such Covered Software under the terms of
+     such Secondary License(s), so that the recipient of the Larger Work may, at
+     their option, further distribute the Covered Software under the terms of
+     either this License or such Secondary License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices (including
+     copyright notices, patent notices, disclaimers of warranty, or limitations
+     of liability) contained within the Source Code Form of the Covered
+     Software, except that You may alter any license notices to the extent
+     required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on behalf
+     of any Contributor. You must make it absolutely clear that any such
+     warranty, support, indemnity, or liability obligation is offered by You
+     alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute, judicial
+   order, or regulation then You must: (a) comply with the terms of this License
+   to the maximum extent possible; and (b) describe the limitations and the code
+   they affect. Such description must be placed in a text file included with all
+   distributions of the Covered Software under this License. Except to the
+   extent prohibited by statute or regulation, such description must be
+   sufficiently detailed for a recipient of ordinary skill to be able to
+   understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+     if such Contributor fails to notify You of the non-compliance by some
+     reasonable means prior to 60 days after You have come back into compliance.
+     Moreover, Your grants from a particular Contributor are reinstated on an
+     ongoing basis if such Contributor notifies You of the non-compliance by
+     some reasonable means, this is the first time You have received notice of
+     non-compliance with this License from such Contributor, and You become
+     compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions, counter-claims,
+     and cross-claims) alleging that a Contributor Version directly or
+     indirectly infringes any patent, then the rights granted to You by any and
+     all Contributors for the Covered Software under Section 2.1 of this License
+     shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an “as is” basis, without
+   warranty of any kind, either expressed, implied, or statutory, including,
+   without limitation, warranties that the Covered Software is free of defects,
+   merchantable, fit for a particular purpose or non-infringing. The entire
+   risk as to the quality and performance of the Covered Software is with You.
+   Should any Covered Software prove defective in any respect, You (not any
+   Contributor) assume the cost of any necessary servicing, repair, or
+   correction. This disclaimer of warranty constitutes an essential part of this
+   License. No use of  any Covered Software is authorized under this License
+   except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from such
+   party’s negligence to the extent applicable law prohibits such limitation.
+   Some jurisdictions do not allow the exclusion or limitation of incidental or
+   consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts of
+   a jurisdiction where the defendant maintains its principal place of business
+   and such litigation shall be governed by laws of that jurisdiction, without
+   reference to its conflict-of-law provisions. Nothing in this Section shall
+   prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject matter
+   hereof. If any provision of this License is held to be unenforceable, such
+   provision shall be reformed only to the extent necessary to make it
+   enforceable. Any law or regulation which provides that the language of a
+   contract shall be construed against the drafter shall not be used to construe
+   this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version of
+      the License under which You originally received the Covered Software, or
+      under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a modified
+      version of this License if you rename the license and remove any
+      references to the name of the license steward (except to note that such
+      modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+      If You choose to distribute Source Code Form that is Incompatible With
+      Secondary Licenses under the terms of this version of the License, the
+      notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+      This Source Code Form is “Incompatible
+      With Secondary Licenses”, as defined by
+      the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/serf/coordinate/client.go b/vendor/github.com/hashicorp/serf/coordinate/client.go
new file mode 100644
index 0000000..3582ee4
--- /dev/null
+++ b/vendor/github.com/hashicorp/serf/coordinate/client.go
@@ -0,0 +1,243 @@
+package coordinate
+
+import (
+	"fmt"
+	"math"
+	"sort"
+	"sync"
+	"time"
+
+	"github.com/armon/go-metrics"
+)
+
+// Client manages the estimated network coordinate for a given node, and adjusts
+// it as the node observes round trip times and estimated coordinates from other
+// nodes. The core algorithm is based on Vivaldi, see the documentation for Config
+// for more details.
+type Client struct {
+	// coord is the current estimate of the client's network coordinate.
+	coord *Coordinate
+
+	// origin is a coordinate sitting at the origin.
+	origin *Coordinate
+
+	// config contains the tuning parameters that govern the performance of
+	// the algorithm.
+	config *Config
+
+	// adjustmentIndex is the current index into the adjustmentSamples slice.
+	adjustmentIndex uint
+
+	// adjustment is used to store samples for the adjustment calculation.
+	adjustmentSamples []float64
+
+	// latencyFilterSamples is used to store the last several RTT samples,
+	// keyed by node name. We will use the config's LatencyFilterSamples
+	// value to determine how many samples we keep, per node.
+	latencyFilterSamples map[string][]float64
+
+	// stats is used to record events that occur when updating coordinates.
+	stats ClientStats
+
+	// mutex enables safe concurrent access to the client.
+	mutex sync.RWMutex
+}
+
+// ClientStats is used to record events that occur when updating coordinates.
+type ClientStats struct {
+	// Resets is incremented any time we reset our local coordinate because
+	// our calculations have resulted in an invalid state.
+	Resets int
+}
+
+// NewClient creates a new Client and verifies the configuration is valid.
+func NewClient(config *Config) (*Client, error) {
+	if !(config.Dimensionality > 0) {
+		return nil, fmt.Errorf("dimensionality must be >0")
+	}
+
+	return &Client{
+		coord:                NewCoordinate(config),
+		origin:               NewCoordinate(config),
+		config:               config,
+		adjustmentIndex:      0,
+		adjustmentSamples:    make([]float64, config.AdjustmentWindowSize),
+		latencyFilterSamples: make(map[string][]float64),
+	}, nil
+}
+
+// GetCoordinate returns a copy of the coordinate for this client.
+func (c *Client) GetCoordinate() *Coordinate {
+	c.mutex.RLock()
+	defer c.mutex.RUnlock()
+
+	return c.coord.Clone()
+}
+
+// SetCoordinate forces the client's coordinate to a known state.
+func (c *Client) SetCoordinate(coord *Coordinate) error {
+	c.mutex.Lock()
+	defer c.mutex.Unlock()
+
+	if err := c.checkCoordinate(coord); err != nil {
+		return err
+	}
+
+	c.coord = coord.Clone()
+	return nil
+}
+
+// ForgetNode removes any client state for the given node.
+func (c *Client) ForgetNode(node string) {
+	c.mutex.Lock()
+	defer c.mutex.Unlock()
+
+	delete(c.latencyFilterSamples, node)
+}
+
+// Stats returns a copy of stats for the client.
+func (c *Client) Stats() ClientStats {
+	c.mutex.Lock()
+	defer c.mutex.Unlock()
+
+	return c.stats
+}
+
+// checkCoordinate returns an error if the coordinate isn't compatible with
+// this client, or if the coordinate itself isn't valid. This assumes the mutex
+// has been locked already.
+func (c *Client) checkCoordinate(coord *Coordinate) error {
+	if !c.coord.IsCompatibleWith(coord) {
+		return fmt.Errorf("dimensions aren't compatible")
+	}
+
+	if !coord.IsValid() {
+		return fmt.Errorf("coordinate is invalid")
+	}
+
+	return nil
+}
+
+// latencyFilter applies a simple moving median filter with a new sample for
+// a node. This assumes that the mutex has been locked already.
+func (c *Client) latencyFilter(node string, rttSeconds float64) float64 {
+	samples, ok := c.latencyFilterSamples[node]
+	if !ok {
+		samples = make([]float64, 0, c.config.LatencyFilterSize)
+	}
+
+	// Add the new sample and trim the list, if needed.
+	samples = append(samples, rttSeconds)
+	if len(samples) > int(c.config.LatencyFilterSize) {
+		samples = samples[1:]
+	}
+	c.latencyFilterSamples[node] = samples
+
+	// Sort a copy of the samples and return the median.
+	sorted := make([]float64, len(samples))
+	copy(sorted, samples)
+	sort.Float64s(sorted)
+	return sorted[len(sorted)/2]
+}
+
+// updateVivialdi updates the Vivaldi portion of the client's coordinate. This
+// assumes that the mutex has been locked already.
+func (c *Client) updateVivaldi(other *Coordinate, rttSeconds float64) {
+	const zeroThreshold = 1.0e-6
+
+	dist := c.coord.DistanceTo(other).Seconds()
+	if rttSeconds < zeroThreshold {
+		rttSeconds = zeroThreshold
+	}
+	wrongness := math.Abs(dist-rttSeconds) / rttSeconds
+
+	totalError := c.coord.Error + other.Error
+	if totalError < zeroThreshold {
+		totalError = zeroThreshold
+	}
+	weight := c.coord.Error / totalError
+
+	c.coord.Error = c.config.VivaldiCE*weight*wrongness + c.coord.Error*(1.0-c.config.VivaldiCE*weight)
+	if c.coord.Error > c.config.VivaldiErrorMax {
+		c.coord.Error = c.config.VivaldiErrorMax
+	}
+
+	delta := c.config.VivaldiCC * weight
+	force := delta * (rttSeconds - dist)
+	c.coord = c.coord.ApplyForce(c.config, force, other)
+}
+
+// updateAdjustment updates the adjustment portion of the client's coordinate, if
+// the feature is enabled. This assumes that the mutex has been locked already.
+func (c *Client) updateAdjustment(other *Coordinate, rttSeconds float64) {
+	if c.config.AdjustmentWindowSize == 0 {
+		return
+	}
+
+	// Note that the existing adjustment factors don't figure in to this
+	// calculation so we use the raw distance here.
+	dist := c.coord.rawDistanceTo(other)
+	c.adjustmentSamples[c.adjustmentIndex] = rttSeconds - dist
+	c.adjustmentIndex = (c.adjustmentIndex + 1) % c.config.AdjustmentWindowSize
+
+	sum := 0.0
+	for _, sample := range c.adjustmentSamples {
+		sum += sample
+	}
+	c.coord.Adjustment = sum / (2.0 * float64(c.config.AdjustmentWindowSize))
+}
+
+// updateGravity applies a small amount of gravity to pull coordinates towards
+// the center of the coordinate system to combat drift. This assumes that the
+// mutex is locked already.
+func (c *Client) updateGravity() {
+	dist := c.origin.DistanceTo(c.coord).Seconds()
+	force := -1.0 * math.Pow(dist/c.config.GravityRho, 2.0)
+	c.coord = c.coord.ApplyForce(c.config, force, c.origin)
+}
+
+// Update takes other, a coordinate for another node, and rtt, a round trip
+// time observation for a ping to that node, and updates the estimated position of
+// the client's coordinate. Returns the updated coordinate.
+func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) (*Coordinate, error) {
+	c.mutex.Lock()
+	defer c.mutex.Unlock()
+
+	if err := c.checkCoordinate(other); err != nil {
+		return nil, err
+	}
+
+	// The code down below can handle zero RTTs, which we have seen in
+	// https://github.com/hashicorp/consul/issues/3789, presumably in
+	// environments with coarse-grained monotonic clocks (we are still
+	// trying to pin this down). In any event, this is ok from a code PoV
+	// so we don't need to alert operators with spammy messages. We did
+	// add a counter so this is still observable, though.
+	const maxRTT = 10 * time.Second
+	if rtt < 0 || rtt > maxRTT {
+		return nil, fmt.Errorf("round trip time not in valid range, duration %v is not a positive value less than %v ", rtt, maxRTT)
+	}
+	if rtt == 0 {
+		metrics.IncrCounter([]string{"serf", "coordinate", "zero-rtt"}, 1)
+	}
+
+	rttSeconds := c.latencyFilter(node, rtt.Seconds())
+	c.updateVivaldi(other, rttSeconds)
+	c.updateAdjustment(other, rttSeconds)
+	c.updateGravity()
+	if !c.coord.IsValid() {
+		c.stats.Resets++
+		c.coord = NewCoordinate(c.config)
+	}
+
+	return c.coord.Clone(), nil
+}
+
+// DistanceTo returns the estimated RTT from the client's coordinate to other, the
+// coordinate for another node.
+func (c *Client) DistanceTo(other *Coordinate) time.Duration {
+	c.mutex.RLock()
+	defer c.mutex.RUnlock()
+
+	return c.coord.DistanceTo(other)
+}
diff --git a/vendor/github.com/hashicorp/serf/coordinate/config.go b/vendor/github.com/hashicorp/serf/coordinate/config.go
new file mode 100644
index 0000000..b85a8ab
--- /dev/null
+++ b/vendor/github.com/hashicorp/serf/coordinate/config.go
@@ -0,0 +1,70 @@
+package coordinate
+
+// Config is used to set the parameters of the Vivaldi-based coordinate mapping
+// algorithm.
+//
+// The following references are called out at various points in the documentation
+// here:
+//
+// [1] Dabek, Frank, et al. "Vivaldi: A decentralized network coordinate system."
+//     ACM SIGCOMM Computer Communication Review. Vol. 34. No. 4. ACM, 2004.
+// [2] Ledlie, Jonathan, Paul Gardner, and Margo I. Seltzer. "Network Coordinates
+//     in the Wild." NSDI. Vol. 7. 2007.
+// [3] Lee, Sanghwan, et al. "On suitability of Euclidean embedding for
+//     host-based network coordinate systems." Networking, IEEE/ACM Transactions
+//     on 18.1 (2010): 27-40.
+type Config struct {
+	// The dimensionality of the coordinate system. As discussed in [2], more
+	// dimensions improves the accuracy of the estimates up to a point. Per [2]
+	// we chose 8 dimensions plus a non-Euclidean height.
+	Dimensionality uint
+
+	// VivaldiErrorMax is the default error value when a node hasn't yet made
+	// any observations. It also serves as an upper limit on the error value in
+	// case observations cause the error value to increase without bound.
+	VivaldiErrorMax float64
+
+	// VivaldiCE is a tuning factor that controls the maximum impact an
+	// observation can have on a node's confidence. See [1] for more details.
+	VivaldiCE float64
+
+	// VivaldiCC is a tuning factor that controls the maximum impact an
+	// observation can have on a node's coordinate. See [1] for more details.
+	VivaldiCC float64
+
+	// AdjustmentWindowSize is a tuning factor that determines how many samples
+	// we retain to calculate the adjustment factor as discussed in [3]. Setting
+	// this to zero disables this feature.
+	AdjustmentWindowSize uint
+
+	// HeightMin is the minimum value of the height parameter. Since this
+	// always must be positive, it will introduce a small amount error, so
+	// the chosen value should be relatively small compared to "normal"
+	// coordinates.
+	HeightMin float64
+
+	// LatencyFilterSamples is the maximum number of samples that are retained
+	// per node, in order to compute a median. The intent is to ride out blips
+	// but still keep the delay low, since our time to probe any given node is
+	// pretty infrequent. See [2] for more details.
+	LatencyFilterSize uint
+
+	// GravityRho is a tuning factor that sets how much gravity has an effect
+	// to try to re-center coordinates. See [2] for more details.
+	GravityRho float64
+}
+
+// DefaultConfig returns a Config that has some default values suitable for
+// basic testing of the algorithm, but not tuned to any particular type of cluster.
+func DefaultConfig() *Config {
+	return &Config{
+		Dimensionality:       8,
+		VivaldiErrorMax:      1.5,
+		VivaldiCE:            0.25,
+		VivaldiCC:            0.25,
+		AdjustmentWindowSize: 20,
+		HeightMin:            10.0e-6,
+		LatencyFilterSize:    3,
+		GravityRho:           150.0,
+	}
+}
diff --git a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go
new file mode 100644
index 0000000..fbe792c
--- /dev/null
+++ b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go
@@ -0,0 +1,203 @@
+package coordinate
+
+import (
+	"math"
+	"math/rand"
+	"time"
+)
+
+// Coordinate is a specialized structure for holding network coordinates for the
+// Vivaldi-based coordinate mapping algorithm. All of the fields should be public
+// to enable this to be serialized. All values in here are in units of seconds.
+type Coordinate struct {
+	// Vec is the Euclidean portion of the coordinate. This is used along
+	// with the other fields to provide an overall distance estimate. The
+	// units here are seconds.
+	Vec []float64
+
+	// Err reflects the confidence in the given coordinate and is updated
+	// dynamically by the Vivaldi Client. This is dimensionless.
+	Error float64
+
+	// Adjustment is a distance offset computed based on a calculation over
+	// observations from all other nodes over a fixed window and is updated
+	// dynamically by the Vivaldi Client. The units here are seconds.
+	Adjustment float64
+
+	// Height is a distance offset that accounts for non-Euclidean effects
+	// which model the access links from nodes to the core Internet. The access
+	// links are usually set by bandwidth and congestion, and the core links
+	// usually follow distance based on geography.
+	Height float64
+}
+
+const (
+	// secondsToNanoseconds is used to convert float seconds to nanoseconds.
+	secondsToNanoseconds = 1.0e9
+
+	// zeroThreshold is used to decide if two coordinates are on top of each
+	// other.
+	zeroThreshold = 1.0e-6
+)
+
+// ErrDimensionalityConflict will be panic-d if you try to perform operations
+// with incompatible dimensions.
+type DimensionalityConflictError struct{}
+
+// Adds the error interface.
+func (e DimensionalityConflictError) Error() string {
+	return "coordinate dimensionality does not match"
+}
+
+// NewCoordinate creates a new coordinate at the origin, using the given config
+// to supply key initial values.
+func NewCoordinate(config *Config) *Coordinate {
+	return &Coordinate{
+		Vec:        make([]float64, config.Dimensionality),
+		Error:      config.VivaldiErrorMax,
+		Adjustment: 0.0,
+		Height:     config.HeightMin,
+	}
+}
+
+// Clone creates an independent copy of this coordinate.
+func (c *Coordinate) Clone() *Coordinate {
+	vec := make([]float64, len(c.Vec))
+	copy(vec, c.Vec)
+	return &Coordinate{
+		Vec:        vec,
+		Error:      c.Error,
+		Adjustment: c.Adjustment,
+		Height:     c.Height,
+	}
+}
+
+// componentIsValid returns false if a floating point value is a NaN or an
+// infinity.
+func componentIsValid(f float64) bool {
+	return !math.IsInf(f, 0) && !math.IsNaN(f)
+}
+
+// IsValid returns false if any component of a coordinate isn't valid, per the
+// componentIsValid() helper above.
+func (c *Coordinate) IsValid() bool {
+	for i := range c.Vec {
+		if !componentIsValid(c.Vec[i]) {
+			return false
+		}
+	}
+
+	return componentIsValid(c.Error) &&
+		componentIsValid(c.Adjustment) &&
+		componentIsValid(c.Height)
+}
+
+// IsCompatibleWith checks to see if the two coordinates are compatible
+// dimensionally. If this returns true then you are guaranteed to not get
+// any runtime errors operating on them.
+func (c *Coordinate) IsCompatibleWith(other *Coordinate) bool {
+	return len(c.Vec) == len(other.Vec)
+}
+
+// ApplyForce returns the result of applying the force from the direction of the
+// other coordinate.
+func (c *Coordinate) ApplyForce(config *Config, force float64, other *Coordinate) *Coordinate {
+	if !c.IsCompatibleWith(other) {
+		panic(DimensionalityConflictError{})
+	}
+
+	ret := c.Clone()
+	unit, mag := unitVectorAt(c.Vec, other.Vec)
+	ret.Vec = add(ret.Vec, mul(unit, force))
+	if mag > zeroThreshold {
+		ret.Height = (ret.Height+other.Height)*force/mag + ret.Height
+		ret.Height = math.Max(ret.Height, config.HeightMin)
+	}
+	return ret
+}
+
+// DistanceTo returns the distance between this coordinate and the other
+// coordinate, including adjustments.
+func (c *Coordinate) DistanceTo(other *Coordinate) time.Duration {
+	if !c.IsCompatibleWith(other) {
+		panic(DimensionalityConflictError{})
+	}
+
+	dist := c.rawDistanceTo(other)
+	adjustedDist := dist + c.Adjustment + other.Adjustment
+	if adjustedDist > 0.0 {
+		dist = adjustedDist
+	}
+	return time.Duration(dist * secondsToNanoseconds)
+}
+
+// rawDistanceTo returns the Vivaldi distance between this coordinate and the
+// other coordinate in seconds, not including adjustments. This assumes the
+// dimensions have already been checked to be compatible.
+func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 {
+	return magnitude(diff(c.Vec, other.Vec)) + c.Height + other.Height
+}
+
+// add returns the sum of vec1 and vec2. This assumes the dimensions have
+// already been checked to be compatible.
+func add(vec1 []float64, vec2 []float64) []float64 {
+	ret := make([]float64, len(vec1))
+	for i := range ret {
+		ret[i] = vec1[i] + vec2[i]
+	}
+	return ret
+}
+
+// diff returns the difference between the vec1 and vec2. This assumes the
+// dimensions have already been checked to be compatible.
+func diff(vec1 []float64, vec2 []float64) []float64 {
+	ret := make([]float64, len(vec1))
+	for i := range ret {
+		ret[i] = vec1[i] - vec2[i]
+	}
+	return ret
+}
+
+// mul returns vec multiplied by a scalar factor.
+func mul(vec []float64, factor float64) []float64 {
+	ret := make([]float64, len(vec))
+	for i := range vec {
+		ret[i] = vec[i] * factor
+	}
+	return ret
+}
+
+// magnitude computes the magnitude of the vec.
+func magnitude(vec []float64) float64 {
+	sum := 0.0
+	for i := range vec {
+		sum += vec[i] * vec[i]
+	}
+	return math.Sqrt(sum)
+}
+
+// unitVectorAt returns a unit vector pointing at vec1 from vec2. If the two
+// positions are the same then a random unit vector is returned. We also return
+// the distance between the points for use in the later height calculation.
+func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) {
+	ret := diff(vec1, vec2)
+
+	// If the coordinates aren't on top of each other we can normalize.
+	if mag := magnitude(ret); mag > zeroThreshold {
+		return mul(ret, 1.0/mag), mag
+	}
+
+	// Otherwise, just return a random unit vector.
+	for i := range ret {
+		ret[i] = rand.Float64() - 0.5
+	}
+	if mag := magnitude(ret); mag > zeroThreshold {
+		return mul(ret, 1.0/mag), 0.0
+	}
+
+	// And finally just give up and make a unit vector along the first
+	// dimension. This should be exceedingly rare.
+	ret = make([]float64, len(ret))
+	ret[0] = 1.0
+	return ret, 0.0
+}
diff --git a/vendor/github.com/hashicorp/serf/coordinate/phantom.go b/vendor/github.com/hashicorp/serf/coordinate/phantom.go
new file mode 100644
index 0000000..6fb033c
--- /dev/null
+++ b/vendor/github.com/hashicorp/serf/coordinate/phantom.go
@@ -0,0 +1,187 @@
+package coordinate
+
+import (
+	"fmt"
+	"math"
+	"math/rand"
+	"time"
+)
+
+// GenerateClients returns a slice with nodes number of clients, all with the
+// given config.
+func GenerateClients(nodes int, config *Config) ([]*Client, error) {
+	clients := make([]*Client, nodes)
+	for i, _ := range clients {
+		client, err := NewClient(config)
+		if err != nil {
+			return nil, err
+		}
+
+		clients[i] = client
+	}
+	return clients, nil
+}
+
+// GenerateLine returns a truth matrix as if all the nodes are in a straight linke
+// with the given spacing between them.
+func GenerateLine(nodes int, spacing time.Duration) [][]time.Duration {
+	truth := make([][]time.Duration, nodes)
+	for i := range truth {
+		truth[i] = make([]time.Duration, nodes)
+	}
+
+	for i := 0; i < nodes; i++ {
+		for j := i + 1; j < nodes; j++ {
+			rtt := time.Duration(j-i) * spacing
+			truth[i][j], truth[j][i] = rtt, rtt
+		}
+	}
+	return truth
+}
+
+// GenerateGrid returns a truth matrix as if all the nodes are in a two dimensional
+// grid with the given spacing between them.
+func GenerateGrid(nodes int, spacing time.Duration) [][]time.Duration {
+	truth := make([][]time.Duration, nodes)
+	for i := range truth {
+		truth[i] = make([]time.Duration, nodes)
+	}
+
+	n := int(math.Sqrt(float64(nodes)))
+	for i := 0; i < nodes; i++ {
+		for j := i + 1; j < nodes; j++ {
+			x1, y1 := float64(i%n), float64(i/n)
+			x2, y2 := float64(j%n), float64(j/n)
+			dx, dy := x2-x1, y2-y1
+			dist := math.Sqrt(dx*dx + dy*dy)
+			rtt := time.Duration(dist * float64(spacing))
+			truth[i][j], truth[j][i] = rtt, rtt
+		}
+	}
+	return truth
+}
+
+// GenerateSplit returns a truth matrix as if half the nodes are close together in
+// one location and half the nodes are close together in another. The lan factor
+// is used to separate the nodes locally and the wan factor represents the split
+// between the two sides.
+func GenerateSplit(nodes int, lan time.Duration, wan time.Duration) [][]time.Duration {
+	truth := make([][]time.Duration, nodes)
+	for i := range truth {
+		truth[i] = make([]time.Duration, nodes)
+	}
+
+	split := nodes / 2
+	for i := 0; i < nodes; i++ {
+		for j := i + 1; j < nodes; j++ {
+			rtt := lan
+			if (i <= split && j > split) || (i > split && j <= split) {
+				rtt += wan
+			}
+			truth[i][j], truth[j][i] = rtt, rtt
+		}
+	}
+	return truth
+}
+
+// GenerateCircle returns a truth matrix for a set of nodes, evenly distributed
+// around a circle with the given radius. The first node is at the "center" of the
+// circle because it's equidistant from all the other nodes, but we place it at
+// double the radius, so it should show up above all the other nodes in height.
+func GenerateCircle(nodes int, radius time.Duration) [][]time.Duration {
+	truth := make([][]time.Duration, nodes)
+	for i := range truth {
+		truth[i] = make([]time.Duration, nodes)
+	}
+
+	for i := 0; i < nodes; i++ {
+		for j := i + 1; j < nodes; j++ {
+			var rtt time.Duration
+			if i == 0 {
+				rtt = 2 * radius
+			} else {
+				t1 := 2.0 * math.Pi * float64(i) / float64(nodes)
+				x1, y1 := math.Cos(t1), math.Sin(t1)
+				t2 := 2.0 * math.Pi * float64(j) / float64(nodes)
+				x2, y2 := math.Cos(t2), math.Sin(t2)
+				dx, dy := x2-x1, y2-y1
+				dist := math.Sqrt(dx*dx + dy*dy)
+				rtt = time.Duration(dist * float64(radius))
+			}
+			truth[i][j], truth[j][i] = rtt, rtt
+		}
+	}
+	return truth
+}
+
+// GenerateRandom returns a truth matrix for a set of nodes with normally
+// distributed delays, with the given mean and deviation. The RNG is re-seeded
+// so you always get the same matrix for a given size.
+func GenerateRandom(nodes int, mean time.Duration, deviation time.Duration) [][]time.Duration {
+	rand.Seed(1)
+
+	truth := make([][]time.Duration, nodes)
+	for i := range truth {
+		truth[i] = make([]time.Duration, nodes)
+	}
+
+	for i := 0; i < nodes; i++ {
+		for j := i + 1; j < nodes; j++ {
+			rttSeconds := rand.NormFloat64()*deviation.Seconds() + mean.Seconds()
+			rtt := time.Duration(rttSeconds * secondsToNanoseconds)
+			truth[i][j], truth[j][i] = rtt, rtt
+		}
+	}
+	return truth
+}
+
+// Simulate runs the given number of cycles using the given list of clients and
+// truth matrix. On each cycle, each client will pick a random node and observe
+// the truth RTT, updating its coordinate estimate. The RNG is re-seeded for
+// each simulation run to get deterministic results (for this algorithm and the
+// underlying algorithm which will use random numbers for position vectors when
+// starting out with everything at the origin).
+func Simulate(clients []*Client, truth [][]time.Duration, cycles int) {
+	rand.Seed(1)
+
+	nodes := len(clients)
+	for cycle := 0; cycle < cycles; cycle++ {
+		for i, _ := range clients {
+			if j := rand.Intn(nodes); j != i {
+				c := clients[j].GetCoordinate()
+				rtt := truth[i][j]
+				node := fmt.Sprintf("node_%d", j)
+				clients[i].Update(node, c, rtt)
+			}
+		}
+	}
+}
+
+// Stats is returned from the Evaluate function with a summary of the algorithm
+// performance.
+type Stats struct {
+	ErrorMax float64
+	ErrorAvg float64
+}
+
+// Evaluate uses the coordinates of the given clients to calculate estimated
+// distances and compares them with the given truth matrix, returning summary
+// stats.
+func Evaluate(clients []*Client, truth [][]time.Duration) (stats Stats) {
+	nodes := len(clients)
+	count := 0
+	for i := 0; i < nodes; i++ {
+		for j := i + 1; j < nodes; j++ {
+			est := clients[i].DistanceTo(clients[j].GetCoordinate()).Seconds()
+			actual := truth[i][j].Seconds()
+			error := math.Abs(est-actual) / actual
+			stats.ErrorMax = math.Max(stats.ErrorMax, error)
+			stats.ErrorAvg += error
+			count += 1
+		}
+	}
+
+	stats.ErrorAvg /= float64(count)
+	fmt.Printf("Error avg=%9.6f max=%9.6f\n", stats.ErrorAvg, stats.ErrorMax)
+	return
+}
diff --git a/vendor/github.com/hashicorp/serf/ops-misc/debian/copyright b/vendor/github.com/hashicorp/serf/ops-misc/debian/copyright
new file mode 100644
index 0000000..21a1a1b
--- /dev/null
+++ b/vendor/github.com/hashicorp/serf/ops-misc/debian/copyright
@@ -0,0 +1,2 @@
+Name: serf
+Copyright: Hashicorp 2013
diff --git a/vendor/github.com/hashicorp/serf/website/LICENSE.md b/vendor/github.com/hashicorp/serf/website/LICENSE.md
new file mode 100644
index 0000000..3189f43
--- /dev/null
+++ b/vendor/github.com/hashicorp/serf/website/LICENSE.md
@@ -0,0 +1,10 @@
+# Proprietary License
+
+This license is temporary while a more official one is drafted. However,
+this should make it clear:
+
+The text contents of this website are MPL 2.0 licensed.
+
+The design contents of this website are proprietary and may not be reproduced
+or reused in any way other than to run the website locally. The license for
+the design is owned solely by HashiCorp, Inc.
diff --git a/vendor/github.com/hashicorp/serf/website/source/LICENSE b/vendor/github.com/hashicorp/serf/website/source/LICENSE
new file mode 100644
index 0000000..36c29d7
--- /dev/null
+++ b/vendor/github.com/hashicorp/serf/website/source/LICENSE
@@ -0,0 +1,10 @@
+# Proprietary License
+
+This license is temporary while a more official one is drafted. However,
+this should make it clear:
+
+* The text contents of this website are MPL 2.0 licensed.
+
+* The design contents of this website are proprietary and may not be reproduced
+  or reused in any way other than to run the Serf website locally. The license
+  for the design is owned solely by HashiCorp, Inc.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
new file mode 100644
index 0000000..14127cd
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
@@ -0,0 +1,9 @@
+(The MIT License)
+
+Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
new file mode 100644
index 0000000..949b77e
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
@@ -0,0 +1,40 @@
+# Windows Terminal Sequences
+
+This library allow for enabling Windows terminal color support for Go.
+
+See [Console Virtual Terminal Sequences](https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences) for details.
+
+## Usage
+
+```go
+import (
+	"syscall"
+	
+	sequences "github.com/konsorten/go-windows-terminal-sequences"
+)
+
+func main() {
+	sequences.EnableVirtualTerminalProcessing(syscall.Stdout, true)
+}
+
+```
+
+## Authors
+
+The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de).
+
+We thank all the authors who provided code to this library:
+
+* Felix Kollmann
+
+## License
+
+(The MIT License)
+
+Copyright (c) 2018 marvin + konsorten GmbH (open-source@konsorten.de)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
new file mode 100644
index 0000000..716c613
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
@@ -0,0 +1 @@
+module github.com/konsorten/go-windows-terminal-sequences
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
new file mode 100644
index 0000000..ef18d8f
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
@@ -0,0 +1,36 @@
+// +build windows
+
+package sequences
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+var (
+	kernel32Dll    *syscall.LazyDLL  = syscall.NewLazyDLL("Kernel32.dll")
+	setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode")
+)
+
+func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error {
+	const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4
+
+	var mode uint32
+	err := syscall.GetConsoleMode(syscall.Stdout, &mode)
+	if err != nil {
+		return err
+	}
+
+	if enable {
+		mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING
+	} else {
+		mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING
+	}
+
+	ret, _, err := setConsoleMode.Call(uintptr(unsafe.Pointer(stream)), uintptr(mode))
+	if ret == 0 {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE
new file mode 100644
index 0000000..f9c841a
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md
new file mode 100644
index 0000000..d70706d
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/README.md
@@ -0,0 +1,14 @@
+# go-homedir
+
+This is a Go library for detecting the user's home directory without
+the use of cgo, so the library can be used in cross-compilation environments.
+
+Usage is incredibly simple, just call `homedir.Dir()` to get the home directory
+for a user, and `homedir.Expand()` to expand the `~` in a path to the home
+directory.
+
+**Why not just use `os/user`?** The built-in `os/user` package requires
+cgo on Darwin systems. This means that any Go code that uses that package
+cannot cross compile. But 99% of the time the use for `os/user` is just to
+retrieve the home directory, which we can do for the current user without
+cgo. This library does that, enabling cross-compilation.
diff --git a/vendor/github.com/mitchellh/go-homedir/go.mod b/vendor/github.com/mitchellh/go-homedir/go.mod
new file mode 100644
index 0000000..7efa09a
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/go.mod
@@ -0,0 +1 @@
+module github.com/mitchellh/go-homedir
diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go
new file mode 100644
index 0000000..fb87bef
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/homedir.go
@@ -0,0 +1,157 @@
+package homedir
+
+import (
+	"bytes"
+	"errors"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync"
+)
+
+// DisableCache will disable caching of the home directory. Caching is enabled
+// by default.
+var DisableCache bool
+
+var homedirCache string
+var cacheLock sync.RWMutex
+
+// Dir returns the home directory for the executing user.
+//
+// This uses an OS-specific method for discovering the home directory.
+// An error is returned if a home directory cannot be detected.
+func Dir() (string, error) {
+	if !DisableCache {
+		cacheLock.RLock()
+		cached := homedirCache
+		cacheLock.RUnlock()
+		if cached != "" {
+			return cached, nil
+		}
+	}
+
+	cacheLock.Lock()
+	defer cacheLock.Unlock()
+
+	var result string
+	var err error
+	if runtime.GOOS == "windows" {
+		result, err = dirWindows()
+	} else {
+		// Unix-like system, so just assume Unix
+		result, err = dirUnix()
+	}
+
+	if err != nil {
+		return "", err
+	}
+	homedirCache = result
+	return result, nil
+}
+
+// Expand expands the path to include the home directory if the path
+// is prefixed with `~`. If it isn't prefixed with `~`, the path is
+// returned as-is.
+func Expand(path string) (string, error) {
+	if len(path) == 0 {
+		return path, nil
+	}
+
+	if path[0] != '~' {
+		return path, nil
+	}
+
+	if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
+		return "", errors.New("cannot expand user-specific home dir")
+	}
+
+	dir, err := Dir()
+	if err != nil {
+		return "", err
+	}
+
+	return filepath.Join(dir, path[1:]), nil
+}
+
+func dirUnix() (string, error) {
+	homeEnv := "HOME"
+	if runtime.GOOS == "plan9" {
+		// On plan9, env vars are lowercase.
+		homeEnv = "home"
+	}
+
+	// First prefer the HOME environmental variable
+	if home := os.Getenv(homeEnv); home != "" {
+		return home, nil
+	}
+
+	var stdout bytes.Buffer
+
+	// If that fails, try OS specific commands
+	if runtime.GOOS == "darwin" {
+		cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`)
+		cmd.Stdout = &stdout
+		if err := cmd.Run(); err == nil {
+			result := strings.TrimSpace(stdout.String())
+			if result != "" {
+				return result, nil
+			}
+		}
+	} else {
+		cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
+		cmd.Stdout = &stdout
+		if err := cmd.Run(); err != nil {
+			// If the error is ErrNotFound, we ignore it. Otherwise, return it.
+			if err != exec.ErrNotFound {
+				return "", err
+			}
+		} else {
+			if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
+				// username:password:uid:gid:gecos:home:shell
+				passwdParts := strings.SplitN(passwd, ":", 7)
+				if len(passwdParts) > 5 {
+					return passwdParts[5], nil
+				}
+			}
+		}
+	}
+
+	// If all else fails, try the shell
+	stdout.Reset()
+	cmd := exec.Command("sh", "-c", "cd && pwd")
+	cmd.Stdout = &stdout
+	if err := cmd.Run(); err != nil {
+		return "", err
+	}
+
+	result := strings.TrimSpace(stdout.String())
+	if result == "" {
+		return "", errors.New("blank output when reading home directory")
+	}
+
+	return result, nil
+}
+
+func dirWindows() (string, error) {
+	// First prefer the HOME environmental variable
+	if home := os.Getenv("HOME"); home != "" {
+		return home, nil
+	}
+
+	// Prefer standard environment variable USERPROFILE
+	if home := os.Getenv("USERPROFILE"); home != "" {
+		return home, nil
+	}
+
+	drive := os.Getenv("HOMEDRIVE")
+	path := os.Getenv("HOMEPATH")
+	home := drive + path
+	if drive == "" || path == "" {
+		return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank")
+	}
+
+	return home, nil
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml
new file mode 100644
index 0000000..d9deadb
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/.travis.yml
@@ -0,0 +1,8 @@
+language: go
+
+go:
+  - 1.9.x
+  - tip
+
+script:
+  - go test
diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
new file mode 100644
index 0000000..fb0f46c
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
@@ -0,0 +1,12 @@
+## 1.1.0 (September 30, 2018)
+
+* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133]
+* Support struct to struct decoding [GH-137]
+* If source map value is nil, then destination map value is nil (instead of empty)
+* If source slice value is nil, then destination slice value is nil (instead of empty)
+* If source pointer is nil, then destination pointer is set to nil (instead of
+  allocated zero value of type)
+
+## 1.0.0
+
+* Initial tagged stable release.
diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE
new file mode 100644
index 0000000..f9c841a
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md
new file mode 100644
index 0000000..0018dc7
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/README.md
@@ -0,0 +1,46 @@
+# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure)
+
+mapstructure is a Go library for decoding generic map values to structures
+and vice versa, while providing helpful error handling.
+
+This library is most useful when decoding values from some data stream (JSON,
+Gob, etc.) where you don't _quite_ know the structure of the underlying data
+until you read a part of it. You can therefore read a `map[string]interface{}`
+and use this library to decode it into the proper underlying native Go
+structure.
+
+## Installation
+
+Standard `go get`:
+
+```
+$ go get github.com/mitchellh/mapstructure
+```
+
+## Usage & Example
+
+For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure).
+
+The `Decode` function has examples associated with it there.
+
+## But Why?!
+
+Go offers fantastic standard libraries for decoding formats such as JSON.
+The standard method is to have a struct pre-created, and populate that struct
+from the bytes of the encoded format. This is great, but the problem is if
+you have configuration or an encoding that changes slightly depending on
+specific fields. For example, consider this JSON:
+
+```json
+{
+  "type": "person",
+  "name": "Mitchell"
+}
+```
+
+Perhaps we can't populate a specific structure without first reading
+the "type" field from the JSON. We could always do two passes over the
+decoding of the JSON (reading the "type" first, and the rest later).
+However, it is much simpler to just decode this into a `map[string]interface{}`
+structure, read the "type" key, then use something like this library
+to decode it into the proper structure.
diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
new file mode 100644
index 0000000..1f0abc6
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
@@ -0,0 +1,217 @@
+package mapstructure
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
+// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
+func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
+	// Create variables here so we can reference them with the reflect pkg
+	var f1 DecodeHookFuncType
+	var f2 DecodeHookFuncKind
+
+	// Fill in the variables into this interface and the rest is done
+	// automatically using the reflect package.
+	potential := []interface{}{f1, f2}
+
+	v := reflect.ValueOf(h)
+	vt := v.Type()
+	for _, raw := range potential {
+		pt := reflect.ValueOf(raw).Type()
+		if vt.ConvertibleTo(pt) {
+			return v.Convert(pt).Interface()
+		}
+	}
+
+	return nil
+}
+
+// DecodeHookExec executes the given decode hook. This should be used
+// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
+// that took reflect.Kind instead of reflect.Type.
+func DecodeHookExec(
+	raw DecodeHookFunc,
+	from reflect.Type, to reflect.Type,
+	data interface{}) (interface{}, error) {
+	switch f := typedDecodeHook(raw).(type) {
+	case DecodeHookFuncType:
+		return f(from, to, data)
+	case DecodeHookFuncKind:
+		return f(from.Kind(), to.Kind(), data)
+	default:
+		return nil, errors.New("invalid decode hook signature")
+	}
+}
+
+// ComposeDecodeHookFunc creates a single DecodeHookFunc that
+// automatically composes multiple DecodeHookFuncs.
+//
+// The composed funcs are called in order, with the result of the
+// previous transformation.
+func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
+	return func(
+		f reflect.Type,
+		t reflect.Type,
+		data interface{}) (interface{}, error) {
+		var err error
+		for _, f1 := range fs {
+			data, err = DecodeHookExec(f1, f, t, data)
+			if err != nil {
+				return nil, err
+			}
+
+			// Modify the from kind to be correct with the new data
+			f = nil
+			if val := reflect.ValueOf(data); val.IsValid() {
+				f = val.Type()
+			}
+		}
+
+		return data, nil
+	}
+}
+
+// StringToSliceHookFunc returns a DecodeHookFunc that converts
+// string to []string by splitting on the given sep.
+func StringToSliceHookFunc(sep string) DecodeHookFunc {
+	return func(
+		f reflect.Kind,
+		t reflect.Kind,
+		data interface{}) (interface{}, error) {
+		if f != reflect.String || t != reflect.Slice {
+			return data, nil
+		}
+
+		raw := data.(string)
+		if raw == "" {
+			return []string{}, nil
+		}
+
+		return strings.Split(raw, sep), nil
+	}
+}
+
+// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
+// strings to time.Duration.
+func StringToTimeDurationHookFunc() DecodeHookFunc {
+	return func(
+		f reflect.Type,
+		t reflect.Type,
+		data interface{}) (interface{}, error) {
+		if f.Kind() != reflect.String {
+			return data, nil
+		}
+		if t != reflect.TypeOf(time.Duration(5)) {
+			return data, nil
+		}
+
+		// Convert it by parsing
+		return time.ParseDuration(data.(string))
+	}
+}
+
+// StringToIPHookFunc returns a DecodeHookFunc that converts
+// strings to net.IP
+func StringToIPHookFunc() DecodeHookFunc {
+	return func(
+		f reflect.Type,
+		t reflect.Type,
+		data interface{}) (interface{}, error) {
+		if f.Kind() != reflect.String {
+			return data, nil
+		}
+		if t != reflect.TypeOf(net.IP{}) {
+			return data, nil
+		}
+
+		// Convert it by parsing
+		ip := net.ParseIP(data.(string))
+		if ip == nil {
+			return net.IP{}, fmt.Errorf("failed parsing ip %v", data)
+		}
+
+		return ip, nil
+	}
+}
+
+// StringToIPNetHookFunc returns a DecodeHookFunc that converts
+// strings to net.IPNet
+func StringToIPNetHookFunc() DecodeHookFunc {
+	return func(
+		f reflect.Type,
+		t reflect.Type,
+		data interface{}) (interface{}, error) {
+		if f.Kind() != reflect.String {
+			return data, nil
+		}
+		if t != reflect.TypeOf(net.IPNet{}) {
+			return data, nil
+		}
+
+		// Convert it by parsing
+		_, net, err := net.ParseCIDR(data.(string))
+		return net, err
+	}
+}
+
+// StringToTimeHookFunc returns a DecodeHookFunc that converts
+// strings to time.Time.
+func StringToTimeHookFunc(layout string) DecodeHookFunc {
+	return func(
+		f reflect.Type,
+		t reflect.Type,
+		data interface{}) (interface{}, error) {
+		if f.Kind() != reflect.String {
+			return data, nil
+		}
+		if t != reflect.TypeOf(time.Time{}) {
+			return data, nil
+		}
+
+		// Convert it by parsing
+		return time.Parse(layout, data.(string))
+	}
+}
+
+// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
+// the decoder.
+//
+// Note that this is significantly different from the WeaklyTypedInput option
+// of the DecoderConfig.
+func WeaklyTypedHook(
+	f reflect.Kind,
+	t reflect.Kind,
+	data interface{}) (interface{}, error) {
+	dataVal := reflect.ValueOf(data)
+	switch t {
+	case reflect.String:
+		switch f {
+		case reflect.Bool:
+			if dataVal.Bool() {
+				return "1", nil
+			}
+			return "0", nil
+		case reflect.Float32:
+			return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
+		case reflect.Int:
+			return strconv.FormatInt(dataVal.Int(), 10), nil
+		case reflect.Slice:
+			dataType := dataVal.Type()
+			elemKind := dataType.Elem().Kind()
+			if elemKind == reflect.Uint8 {
+				return string(dataVal.Interface().([]uint8)), nil
+			}
+		case reflect.Uint:
+			return strconv.FormatUint(dataVal.Uint(), 10), nil
+		}
+	}
+
+	return data, nil
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go
new file mode 100644
index 0000000..47a99e5
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/error.go
@@ -0,0 +1,50 @@
+package mapstructure
+
+import (
+	"errors"
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// Error implements the error interface and can represents multiple
+// errors that occur in the course of a single decode.
+type Error struct {
+	Errors []string
+}
+
+func (e *Error) Error() string {
+	points := make([]string, len(e.Errors))
+	for i, err := range e.Errors {
+		points[i] = fmt.Sprintf("* %s", err)
+	}
+
+	sort.Strings(points)
+	return fmt.Sprintf(
+		"%d error(s) decoding:\n\n%s",
+		len(e.Errors), strings.Join(points, "\n"))
+}
+
+// WrappedErrors implements the errwrap.Wrapper interface to make this
+// return value more useful with the errwrap and go-multierror libraries.
+func (e *Error) WrappedErrors() []error {
+	if e == nil {
+		return nil
+	}
+
+	result := make([]error, len(e.Errors))
+	for i, e := range e.Errors {
+		result[i] = errors.New(e)
+	}
+
+	return result
+}
+
+func appendErrors(errors []string, err error) []string {
+	switch e := err.(type) {
+	case *Error:
+		return append(errors, e.Errors...)
+	default:
+		return append(errors, e.Error())
+	}
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/go.mod b/vendor/github.com/mitchellh/mapstructure/go.mod
new file mode 100644
index 0000000..d2a7125
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/go.mod
@@ -0,0 +1 @@
+module github.com/mitchellh/mapstructure
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
new file mode 100644
index 0000000..95eb686
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
@@ -0,0 +1,1140 @@
+// Package mapstructure exposes functionality to convert an arbitrary
+// map[string]interface{} into a native Go structure.
+//
+// The Go structure can be arbitrarily complex, containing slices,
+// other structs, etc. and the decoder will properly decode nested
+// maps and so on into the proper structures in the native Go struct.
+// See the examples to see what the decoder is capable of.
+package mapstructure
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// DecodeHookFunc is the callback function that can be used for
+// data transformations. See "DecodeHook" in the DecoderConfig
+// struct.
+//
+// The type should be DecodeHookFuncType or DecodeHookFuncKind.
+// Either is accepted. Types are a superset of Kinds (Types can return
+// Kinds) and are generally a richer thing to use, but Kinds are simpler
+// if you only need those.
+//
+// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
+// we started with Kinds and then realized Types were the better solution,
+// but have a promise to not break backwards compat so we now support
+// both.
+type DecodeHookFunc interface{}
+
+// DecodeHookFuncType is a DecodeHookFunc which has complete information about
+// the source and target types.
+type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
+
+// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the
+// source and target types.
+type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
+
+// DecoderConfig is the configuration that is used to create a new decoder
+// and allows customization of various aspects of decoding.
+type DecoderConfig struct {
+	// DecodeHook, if set, will be called before any decoding and any
+	// type conversion (if WeaklyTypedInput is on). This lets you modify
+	// the values before they're set down onto the resulting struct.
+	//
+	// If an error is returned, the entire decode will fail with that
+	// error.
+	DecodeHook DecodeHookFunc
+
+	// If ErrorUnused is true, then it is an error for there to exist
+	// keys in the original map that were unused in the decoding process
+	// (extra keys).
+	ErrorUnused bool
+
+	// ZeroFields, if set to true, will zero fields before writing them.
+	// For example, a map will be emptied before decoded values are put in
+	// it. If this is false, a map will be merged.
+	ZeroFields bool
+
+	// If WeaklyTypedInput is true, the decoder will make the following
+	// "weak" conversions:
+	//
+	//   - bools to string (true = "1", false = "0")
+	//   - numbers to string (base 10)
+	//   - bools to int/uint (true = 1, false = 0)
+	//   - strings to int/uint (base implied by prefix)
+	//   - int to bool (true if value != 0)
+	//   - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
+	//     FALSE, false, False. Anything else is an error)
+	//   - empty array = empty map and vice versa
+	//   - negative numbers to overflowed uint values (base 10)
+	//   - slice of maps to a merged map
+	//   - single values are converted to slices if required. Each
+	//     element is weakly decoded. For example: "4" can become []int{4}
+	//     if the target type is an int slice.
+	//
+	WeaklyTypedInput bool
+
+	// Metadata is the struct that will contain extra metadata about
+	// the decoding. If this is nil, then no metadata will be tracked.
+	Metadata *Metadata
+
+	// Result is a pointer to the struct that will contain the decoded
+	// value.
+	Result interface{}
+
+	// The tag name that mapstructure reads for field names. This
+	// defaults to "mapstructure"
+	TagName string
+}
+
+// A Decoder takes a raw interface value and turns it into structured
+// data, keeping track of rich error information along the way in case
+// anything goes wrong. Unlike the basic top-level Decode method, you can
+// more finely control how the Decoder behaves using the DecoderConfig
+// structure. The top-level Decode method is just a convenience that sets
+// up the most basic Decoder.
+type Decoder struct {
+	config *DecoderConfig
+}
+
+// Metadata contains information about decoding a structure that
+// is tedious or difficult to get otherwise.
+type Metadata struct {
+	// Keys are the keys of the structure which were successfully decoded
+	Keys []string
+
+	// Unused is a slice of keys that were found in the raw value but
+	// weren't decoded since there was no matching field in the result interface
+	Unused []string
+}
+
+// Decode takes an input structure and uses reflection to translate it to
+// the output structure. output must be a pointer to a map or struct.
+func Decode(input interface{}, output interface{}) error {
+	config := &DecoderConfig{
+		Metadata: nil,
+		Result:   output,
+	}
+
+	decoder, err := NewDecoder(config)
+	if err != nil {
+		return err
+	}
+
+	return decoder.Decode(input)
+}
+
+// WeakDecode is the same as Decode but is shorthand to enable
+// WeaklyTypedInput. See DecoderConfig for more info.
+func WeakDecode(input, output interface{}) error {
+	config := &DecoderConfig{
+		Metadata:         nil,
+		Result:           output,
+		WeaklyTypedInput: true,
+	}
+
+	decoder, err := NewDecoder(config)
+	if err != nil {
+		return err
+	}
+
+	return decoder.Decode(input)
+}
+
+// DecodeMetadata is the same as Decode, but is shorthand to
+// enable metadata collection. See DecoderConfig for more info.
+func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
+	config := &DecoderConfig{
+		Metadata: metadata,
+		Result:   output,
+	}
+
+	decoder, err := NewDecoder(config)
+	if err != nil {
+		return err
+	}
+
+	return decoder.Decode(input)
+}
+
+// WeakDecodeMetadata is the same as Decode, but is shorthand to
+// enable both WeaklyTypedInput and metadata collection. See
+// DecoderConfig for more info.
+func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
+	config := &DecoderConfig{
+		Metadata:         metadata,
+		Result:           output,
+		WeaklyTypedInput: true,
+	}
+
+	decoder, err := NewDecoder(config)
+	if err != nil {
+		return err
+	}
+
+	return decoder.Decode(input)
+}
+
+// NewDecoder returns a new decoder for the given configuration. Once
+// a decoder has been returned, the same configuration must not be used
+// again.
+func NewDecoder(config *DecoderConfig) (*Decoder, error) {
+	val := reflect.ValueOf(config.Result)
+	if val.Kind() != reflect.Ptr {
+		return nil, errors.New("result must be a pointer")
+	}
+
+	val = val.Elem()
+	if !val.CanAddr() {
+		return nil, errors.New("result must be addressable (a pointer)")
+	}
+
+	if config.Metadata != nil {
+		if config.Metadata.Keys == nil {
+			config.Metadata.Keys = make([]string, 0)
+		}
+
+		if config.Metadata.Unused == nil {
+			config.Metadata.Unused = make([]string, 0)
+		}
+	}
+
+	if config.TagName == "" {
+		config.TagName = "mapstructure"
+	}
+
+	result := &Decoder{
+		config: config,
+	}
+
+	return result, nil
+}
+
+// Decode decodes the given raw interface to the target pointer specified
+// by the configuration.
+func (d *Decoder) Decode(input interface{}) error {
+	return d.decode("", input, reflect.ValueOf(d.config.Result).Elem())
+}
+
+// Decodes an unknown data type into a specific reflection value.
+func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error {
+	var inputVal reflect.Value
+	if input != nil {
+		inputVal = reflect.ValueOf(input)
+
+		// We need to check here if input is a typed nil. Typed nils won't
+		// match the "input == nil" below so we check that here.
+		if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() {
+			input = nil
+		}
+	}
+
+	if input == nil {
+		// If the data is nil, then we don't set anything, unless ZeroFields is set
+		// to true.
+		if d.config.ZeroFields {
+			outVal.Set(reflect.Zero(outVal.Type()))
+
+			if d.config.Metadata != nil && name != "" {
+				d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+			}
+		}
+		return nil
+	}
+
+	if !inputVal.IsValid() {
+		// If the input value is invalid, then we just set the value
+		// to be the zero value.
+		outVal.Set(reflect.Zero(outVal.Type()))
+		if d.config.Metadata != nil && name != "" {
+			d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+		}
+		return nil
+	}
+
+	if d.config.DecodeHook != nil {
+		// We have a DecodeHook, so let's pre-process the input.
+		var err error
+		input, err = DecodeHookExec(
+			d.config.DecodeHook,
+			inputVal.Type(), outVal.Type(), input)
+		if err != nil {
+			return fmt.Errorf("error decoding '%s': %s", name, err)
+		}
+	}
+
+	var err error
+	outputKind := getKind(outVal)
+	switch outputKind {
+	case reflect.Bool:
+		err = d.decodeBool(name, input, outVal)
+	case reflect.Interface:
+		err = d.decodeBasic(name, input, outVal)
+	case reflect.String:
+		err = d.decodeString(name, input, outVal)
+	case reflect.Int:
+		err = d.decodeInt(name, input, outVal)
+	case reflect.Uint:
+		err = d.decodeUint(name, input, outVal)
+	case reflect.Float32:
+		err = d.decodeFloat(name, input, outVal)
+	case reflect.Struct:
+		err = d.decodeStruct(name, input, outVal)
+	case reflect.Map:
+		err = d.decodeMap(name, input, outVal)
+	case reflect.Ptr:
+		err = d.decodePtr(name, input, outVal)
+	case reflect.Slice:
+		err = d.decodeSlice(name, input, outVal)
+	case reflect.Array:
+		err = d.decodeArray(name, input, outVal)
+	case reflect.Func:
+		err = d.decodeFunc(name, input, outVal)
+	default:
+		// If we reached this point then we weren't able to decode it
+		return fmt.Errorf("%s: unsupported type: %s", name, outputKind)
+	}
+
+	// If we reached here, then we successfully decoded SOMETHING, so
+	// mark the key as used if we're tracking metainput.
+	if d.config.Metadata != nil && name != "" {
+		d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+	}
+
+	return err
+}
+
+// This decodes a basic type (bool, int, string, etc.) and sets the
+// value to "data" of that type.
+func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
+	if val.IsValid() && val.Elem().IsValid() {
+		return d.decode(name, data, val.Elem())
+	}
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	if !dataVal.IsValid() {
+		dataVal = reflect.Zero(val.Type())
+	}
+
+	dataValType := dataVal.Type()
+	if !dataValType.AssignableTo(val.Type()) {
+		return fmt.Errorf(
+			"'%s' expected type '%s', got '%s'",
+			name, val.Type(), dataValType)
+	}
+
+	val.Set(dataVal)
+	return nil
+}
+
+func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	dataKind := getKind(dataVal)
+
+	converted := true
+	switch {
+	case dataKind == reflect.String:
+		val.SetString(dataVal.String())
+	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+		if dataVal.Bool() {
+			val.SetString("1")
+		} else {
+			val.SetString("0")
+		}
+	case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+		val.SetString(strconv.FormatInt(dataVal.Int(), 10))
+	case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+		val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
+	case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+		val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
+	case dataKind == reflect.Slice && d.config.WeaklyTypedInput,
+		dataKind == reflect.Array && d.config.WeaklyTypedInput:
+		dataType := dataVal.Type()
+		elemKind := dataType.Elem().Kind()
+		switch elemKind {
+		case reflect.Uint8:
+			var uints []uint8
+			if dataKind == reflect.Array {
+				uints = make([]uint8, dataVal.Len(), dataVal.Len())
+				for i := range uints {
+					uints[i] = dataVal.Index(i).Interface().(uint8)
+				}
+			} else {
+				uints = dataVal.Interface().([]uint8)
+			}
+			val.SetString(string(uints))
+		default:
+			converted = false
+		}
+	default:
+		converted = false
+	}
+
+	if !converted {
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	dataKind := getKind(dataVal)
+	dataType := dataVal.Type()
+
+	switch {
+	case dataKind == reflect.Int:
+		val.SetInt(dataVal.Int())
+	case dataKind == reflect.Uint:
+		val.SetInt(int64(dataVal.Uint()))
+	case dataKind == reflect.Float32:
+		val.SetInt(int64(dataVal.Float()))
+	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+		if dataVal.Bool() {
+			val.SetInt(1)
+		} else {
+			val.SetInt(0)
+		}
+	case dataKind == reflect.String && d.config.WeaklyTypedInput:
+		i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits())
+		if err == nil {
+			val.SetInt(i)
+		} else {
+			return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
+		}
+	case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+		jn := data.(json.Number)
+		i, err := jn.Int64()
+		if err != nil {
+			return fmt.Errorf(
+				"error decoding json.Number into %s: %s", name, err)
+		}
+		val.SetInt(i)
+	default:
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	dataKind := getKind(dataVal)
+
+	switch {
+	case dataKind == reflect.Int:
+		i := dataVal.Int()
+		if i < 0 && !d.config.WeaklyTypedInput {
+			return fmt.Errorf("cannot parse '%s', %d overflows uint",
+				name, i)
+		}
+		val.SetUint(uint64(i))
+	case dataKind == reflect.Uint:
+		val.SetUint(dataVal.Uint())
+	case dataKind == reflect.Float32:
+		f := dataVal.Float()
+		if f < 0 && !d.config.WeaklyTypedInput {
+			return fmt.Errorf("cannot parse '%s', %f overflows uint",
+				name, f)
+		}
+		val.SetUint(uint64(f))
+	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+		if dataVal.Bool() {
+			val.SetUint(1)
+		} else {
+			val.SetUint(0)
+		}
+	case dataKind == reflect.String && d.config.WeaklyTypedInput:
+		i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits())
+		if err == nil {
+			val.SetUint(i)
+		} else {
+			return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
+		}
+	default:
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	dataKind := getKind(dataVal)
+
+	switch {
+	case dataKind == reflect.Bool:
+		val.SetBool(dataVal.Bool())
+	case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+		val.SetBool(dataVal.Int() != 0)
+	case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+		val.SetBool(dataVal.Uint() != 0)
+	case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+		val.SetBool(dataVal.Float() != 0)
+	case dataKind == reflect.String && d.config.WeaklyTypedInput:
+		b, err := strconv.ParseBool(dataVal.String())
+		if err == nil {
+			val.SetBool(b)
+		} else if dataVal.String() == "" {
+			val.SetBool(false)
+		} else {
+			return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
+		}
+	default:
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	dataKind := getKind(dataVal)
+	dataType := dataVal.Type()
+
+	switch {
+	case dataKind == reflect.Int:
+		val.SetFloat(float64(dataVal.Int()))
+	case dataKind == reflect.Uint:
+		val.SetFloat(float64(dataVal.Uint()))
+	case dataKind == reflect.Float32:
+		val.SetFloat(dataVal.Float())
+	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+		if dataVal.Bool() {
+			val.SetFloat(1)
+		} else {
+			val.SetFloat(0)
+		}
+	case dataKind == reflect.String && d.config.WeaklyTypedInput:
+		f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits())
+		if err == nil {
+			val.SetFloat(f)
+		} else {
+			return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
+		}
+	case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+		jn := data.(json.Number)
+		i, err := jn.Float64()
+		if err != nil {
+			return fmt.Errorf(
+				"error decoding json.Number into %s: %s", name, err)
+		}
+		val.SetFloat(i)
+	default:
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
+	valType := val.Type()
+	valKeyType := valType.Key()
+	valElemType := valType.Elem()
+
+	// By default we overwrite keys in the current map
+	valMap := val
+
+	// If the map is nil or we're purposely zeroing fields, make a new map
+	if valMap.IsNil() || d.config.ZeroFields {
+		// Make a new map to hold our result
+		mapType := reflect.MapOf(valKeyType, valElemType)
+		valMap = reflect.MakeMap(mapType)
+	}
+
+	// Check input type and based on the input type jump to the proper func
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	switch dataVal.Kind() {
+	case reflect.Map:
+		return d.decodeMapFromMap(name, dataVal, val, valMap)
+
+	case reflect.Struct:
+		return d.decodeMapFromStruct(name, dataVal, val, valMap)
+
+	case reflect.Array, reflect.Slice:
+		if d.config.WeaklyTypedInput {
+			return d.decodeMapFromSlice(name, dataVal, val, valMap)
+		}
+
+		fallthrough
+
+	default:
+		return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
+	}
+}
+
+func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
+	// Special case for BC reasons (covered by tests)
+	if dataVal.Len() == 0 {
+		val.Set(valMap)
+		return nil
+	}
+
+	for i := 0; i < dataVal.Len(); i++ {
+		err := d.decode(
+			fmt.Sprintf("%s[%d]", name, i),
+			dataVal.Index(i).Interface(), val)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
+	valType := val.Type()
+	valKeyType := valType.Key()
+	valElemType := valType.Elem()
+
+	// Accumulate errors
+	errors := make([]string, 0)
+
+	// If the input data is empty, then we just match what the input data is.
+	if dataVal.Len() == 0 {
+		if dataVal.IsNil() {
+			if !val.IsNil() {
+				val.Set(dataVal)
+			}
+		} else {
+			// Set to empty allocated value
+			val.Set(valMap)
+		}
+
+		return nil
+	}
+
+	for _, k := range dataVal.MapKeys() {
+		fieldName := fmt.Sprintf("%s[%s]", name, k)
+
+		// First decode the key into the proper type
+		currentKey := reflect.Indirect(reflect.New(valKeyType))
+		if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
+			errors = appendErrors(errors, err)
+			continue
+		}
+
+		// Next decode the data into the proper type
+		v := dataVal.MapIndex(k).Interface()
+		currentVal := reflect.Indirect(reflect.New(valElemType))
+		if err := d.decode(fieldName, v, currentVal); err != nil {
+			errors = appendErrors(errors, err)
+			continue
+		}
+
+		valMap.SetMapIndex(currentKey, currentVal)
+	}
+
+	// Set the built up map to the value
+	val.Set(valMap)
+
+	// If we had errors, return those
+	if len(errors) > 0 {
+		return &Error{errors}
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
+	typ := dataVal.Type()
+	for i := 0; i < typ.NumField(); i++ {
+		// Get the StructField first since this is a cheap operation. If the
+		// field is unexported, then ignore it.
+		f := typ.Field(i)
+		if f.PkgPath != "" {
+			continue
+		}
+
+		// Next get the actual value of this field and verify it is assignable
+		// to the map value.
+		v := dataVal.Field(i)
+		if !v.Type().AssignableTo(valMap.Type().Elem()) {
+			return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem())
+		}
+
+		tagValue := f.Tag.Get(d.config.TagName)
+		tagParts := strings.Split(tagValue, ",")
+
+		// Determine the name of the key in the map
+		keyName := f.Name
+		if tagParts[0] != "" {
+			if tagParts[0] == "-" {
+				continue
+			}
+			keyName = tagParts[0]
+		}
+
+		// If "squash" is specified in the tag, we squash the field down.
+		squash := false
+		for _, tag := range tagParts[1:] {
+			if tag == "squash" {
+				squash = true
+				break
+			}
+		}
+		if squash && v.Kind() != reflect.Struct {
+			return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
+		}
+
+		switch v.Kind() {
+		// this is an embedded struct, so handle it differently
+		case reflect.Struct:
+			x := reflect.New(v.Type())
+			x.Elem().Set(v)
+
+			vType := valMap.Type()
+			vKeyType := vType.Key()
+			vElemType := vType.Elem()
+			mType := reflect.MapOf(vKeyType, vElemType)
+			vMap := reflect.MakeMap(mType)
+
+			err := d.decode(keyName, x.Interface(), vMap)
+			if err != nil {
+				return err
+			}
+
+			if squash {
+				for _, k := range vMap.MapKeys() {
+					valMap.SetMapIndex(k, vMap.MapIndex(k))
+				}
+			} else {
+				valMap.SetMapIndex(reflect.ValueOf(keyName), vMap)
+			}
+
+		default:
+			valMap.SetMapIndex(reflect.ValueOf(keyName), v)
+		}
+	}
+
+	if val.CanAddr() {
+		val.Set(valMap)
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error {
+	// If the input data is nil, then we want to just set the output
+	// pointer to be nil as well.
+	isNil := data == nil
+	if !isNil {
+		switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() {
+		case reflect.Chan,
+			reflect.Func,
+			reflect.Interface,
+			reflect.Map,
+			reflect.Ptr,
+			reflect.Slice:
+			isNil = v.IsNil()
+		}
+	}
+	if isNil {
+		if !val.IsNil() && val.CanSet() {
+			nilValue := reflect.New(val.Type()).Elem()
+			val.Set(nilValue)
+		}
+
+		return nil
+	}
+
+	// Create an element of the concrete (non pointer) type and decode
+	// into that. Then set the value of the pointer to this type.
+	valType := val.Type()
+	valElemType := valType.Elem()
+	if val.CanSet() {
+		realVal := val
+		if realVal.IsNil() || d.config.ZeroFields {
+			realVal = reflect.New(valElemType)
+		}
+
+		if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
+			return err
+		}
+
+		val.Set(realVal)
+	} else {
+		if err := d.decode(name, data, reflect.Indirect(val)); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
+	// Create an element of the concrete (non pointer) type and decode
+	// into that. Then set the value of the pointer to this type.
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	if val.Type() != dataVal.Type() {
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+	val.Set(dataVal)
+	return nil
+}
+
+func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	dataValKind := dataVal.Kind()
+	valType := val.Type()
+	valElemType := valType.Elem()
+	sliceType := reflect.SliceOf(valElemType)
+
+	valSlice := val
+	if valSlice.IsNil() || d.config.ZeroFields {
+		if d.config.WeaklyTypedInput {
+			switch {
+			// Slice and array we use the normal logic
+			case dataValKind == reflect.Slice, dataValKind == reflect.Array:
+				break
+
+			// Empty maps turn into empty slices
+			case dataValKind == reflect.Map:
+				if dataVal.Len() == 0 {
+					val.Set(reflect.MakeSlice(sliceType, 0, 0))
+					return nil
+				}
+				// Create slice of maps of other sizes
+				return d.decodeSlice(name, []interface{}{data}, val)
+
+			case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8:
+				return d.decodeSlice(name, []byte(dataVal.String()), val)
+
+			// All other types we try to convert to the slice type
+			// and "lift" it into it. i.e. a string becomes a string slice.
+			default:
+				// Just re-try this function with data as a slice.
+				return d.decodeSlice(name, []interface{}{data}, val)
+			}
+		}
+
+		// Check input type
+		if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+			return fmt.Errorf(
+				"'%s': source data must be an array or slice, got %s", name, dataValKind)
+
+		}
+
+		// If the input value is empty, then don't allocate since non-nil != nil
+		if dataVal.Len() == 0 {
+			return nil
+		}
+
+		// Make a new slice to hold our result, same size as the original data.
+		valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
+	}
+
+	// Accumulate any errors
+	errors := make([]string, 0)
+
+	for i := 0; i < dataVal.Len(); i++ {
+		currentData := dataVal.Index(i).Interface()
+		for valSlice.Len() <= i {
+			valSlice = reflect.Append(valSlice, reflect.Zero(valElemType))
+		}
+		currentField := valSlice.Index(i)
+
+		fieldName := fmt.Sprintf("%s[%d]", name, i)
+		if err := d.decode(fieldName, currentData, currentField); err != nil {
+			errors = appendErrors(errors, err)
+		}
+	}
+
+	// Finally, set the value to the slice we built up
+	val.Set(valSlice)
+
+	// If there were errors, we return those
+	if len(errors) > 0 {
+		return &Error{errors}
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	dataValKind := dataVal.Kind()
+	valType := val.Type()
+	valElemType := valType.Elem()
+	arrayType := reflect.ArrayOf(valType.Len(), valElemType)
+
+	valArray := val
+
+	if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields {
+		// Check input type
+		if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+			if d.config.WeaklyTypedInput {
+				switch {
+				// Empty maps turn into empty arrays
+				case dataValKind == reflect.Map:
+					if dataVal.Len() == 0 {
+						val.Set(reflect.Zero(arrayType))
+						return nil
+					}
+
+				// All other types we try to convert to the array type
+				// and "lift" it into it. i.e. a string becomes a string array.
+				default:
+					// Just re-try this function with data as a slice.
+					return d.decodeArray(name, []interface{}{data}, val)
+				}
+			}
+
+			return fmt.Errorf(
+				"'%s': source data must be an array or slice, got %s", name, dataValKind)
+
+		}
+		if dataVal.Len() > arrayType.Len() {
+			return fmt.Errorf(
+				"'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len())
+
+		}
+
+		// Make a new array to hold our result, same size as the original data.
+		valArray = reflect.New(arrayType).Elem()
+	}
+
+	// Accumulate any errors
+	errors := make([]string, 0)
+
+	for i := 0; i < dataVal.Len(); i++ {
+		currentData := dataVal.Index(i).Interface()
+		currentField := valArray.Index(i)
+
+		fieldName := fmt.Sprintf("%s[%d]", name, i)
+		if err := d.decode(fieldName, currentData, currentField); err != nil {
+			errors = appendErrors(errors, err)
+		}
+	}
+
+	// Finally, set the value to the array we built up
+	val.Set(valArray)
+
+	// If there were errors, we return those
+	if len(errors) > 0 {
+		return &Error{errors}
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+
+	// If the type of the value to write to and the data match directly,
+	// then we just set it directly instead of recursing into the structure.
+	if dataVal.Type() == val.Type() {
+		val.Set(dataVal)
+		return nil
+	}
+
+	dataValKind := dataVal.Kind()
+	switch dataValKind {
+	case reflect.Map:
+		return d.decodeStructFromMap(name, dataVal, val)
+
+	case reflect.Struct:
+		// Not the most efficient way to do this but we can optimize later if
+		// we want to. To convert from struct to struct we go to map first
+		// as an intermediary.
+		m := make(map[string]interface{})
+		mval := reflect.Indirect(reflect.ValueOf(&m))
+		if err := d.decodeMapFromStruct(name, dataVal, mval, mval); err != nil {
+			return err
+		}
+
+		result := d.decodeStructFromMap(name, mval, val)
+		return result
+
+	default:
+		return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
+	}
+}
+
+func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error {
+	dataValType := dataVal.Type()
+	if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
+		return fmt.Errorf(
+			"'%s' needs a map with string keys, has '%s' keys",
+			name, dataValType.Key().Kind())
+	}
+
+	dataValKeys := make(map[reflect.Value]struct{})
+	dataValKeysUnused := make(map[interface{}]struct{})
+	for _, dataValKey := range dataVal.MapKeys() {
+		dataValKeys[dataValKey] = struct{}{}
+		dataValKeysUnused[dataValKey.Interface()] = struct{}{}
+	}
+
+	errors := make([]string, 0)
+
+	// This slice will keep track of all the structs we'll be decoding.
+	// There can be more than one struct if there are embedded structs
+	// that are squashed.
+	structs := make([]reflect.Value, 1, 5)
+	structs[0] = val
+
+	// Compile the list of all the fields that we're going to be decoding
+	// from all the structs.
+	type field struct {
+		field reflect.StructField
+		val   reflect.Value
+	}
+	fields := []field{}
+	for len(structs) > 0 {
+		structVal := structs[0]
+		structs = structs[1:]
+
+		structType := structVal.Type()
+
+		for i := 0; i < structType.NumField(); i++ {
+			fieldType := structType.Field(i)
+			fieldKind := fieldType.Type.Kind()
+
+			// If "squash" is specified in the tag, we squash the field down.
+			squash := false
+			tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
+			for _, tag := range tagParts[1:] {
+				if tag == "squash" {
+					squash = true
+					break
+				}
+			}
+
+			if squash {
+				if fieldKind != reflect.Struct {
+					errors = appendErrors(errors,
+						fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind))
+				} else {
+					structs = append(structs, structVal.FieldByName(fieldType.Name))
+				}
+				continue
+			}
+
+			// Normal struct field, store it away
+			fields = append(fields, field{fieldType, structVal.Field(i)})
+		}
+	}
+
+	// for fieldType, field := range fields {
+	for _, f := range fields {
+		field, fieldValue := f.field, f.val
+		fieldName := field.Name
+
+		tagValue := field.Tag.Get(d.config.TagName)
+		tagValue = strings.SplitN(tagValue, ",", 2)[0]
+		if tagValue != "" {
+			fieldName = tagValue
+		}
+
+		rawMapKey := reflect.ValueOf(fieldName)
+		rawMapVal := dataVal.MapIndex(rawMapKey)
+		if !rawMapVal.IsValid() {
+			// Do a slower search by iterating over each key and
+			// doing case-insensitive search.
+			for dataValKey := range dataValKeys {
+				mK, ok := dataValKey.Interface().(string)
+				if !ok {
+					// Not a string key
+					continue
+				}
+
+				if strings.EqualFold(mK, fieldName) {
+					rawMapKey = dataValKey
+					rawMapVal = dataVal.MapIndex(dataValKey)
+					break
+				}
+			}
+
+			if !rawMapVal.IsValid() {
+				// There was no matching key in the map for the value in
+				// the struct. Just ignore.
+				continue
+			}
+		}
+
+		// Delete the key we're using from the unused map so we stop tracking
+		delete(dataValKeysUnused, rawMapKey.Interface())
+
+		if !fieldValue.IsValid() {
+			// This should never happen
+			panic("field is not valid")
+		}
+
+		// If we can't set the field, then it is unexported or something,
+		// and we just continue onwards.
+		if !fieldValue.CanSet() {
+			continue
+		}
+
+		// If the name is empty string, then we're at the root, and we
+		// don't dot-join the fields.
+		if name != "" {
+			fieldName = fmt.Sprintf("%s.%s", name, fieldName)
+		}
+
+		if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil {
+			errors = appendErrors(errors, err)
+		}
+	}
+
+	if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
+		keys := make([]string, 0, len(dataValKeysUnused))
+		for rawKey := range dataValKeysUnused {
+			keys = append(keys, rawKey.(string))
+		}
+		sort.Strings(keys)
+
+		err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
+		errors = appendErrors(errors, err)
+	}
+
+	if len(errors) > 0 {
+		return &Error{errors}
+	}
+
+	// Add the unused keys to the list of unused keys if we're tracking metadata
+	if d.config.Metadata != nil {
+		for rawKey := range dataValKeysUnused {
+			key := rawKey.(string)
+			if name != "" {
+				key = fmt.Sprintf("%s.%s", name, key)
+			}
+
+			d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
+		}
+	}
+
+	return nil
+}
+
+func getKind(val reflect.Value) reflect.Kind {
+	kind := val.Kind()
+
+	switch {
+	case kind >= reflect.Int && kind <= reflect.Int64:
+		return reflect.Int
+	case kind >= reflect.Uint && kind <= reflect.Uint64:
+		return reflect.Uint
+	case kind >= reflect.Float32 && kind <= reflect.Float64:
+		return reflect.Float32
+	default:
+		return kind
+	}
+}
diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore
new file mode 100644
index 0000000..e48bab3
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/.gitignore
@@ -0,0 +1,33 @@
+# Created by https://www.gitignore.io/api/macos
+
+### macOS ###
+*.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+# End of https://www.gitignore.io/api/macos
+
+lz4c/lz4c
diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml
new file mode 100644
index 0000000..204afe1
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/.travis.yml
@@ -0,0 +1,22 @@
+language: go
+
+env:
+  - GO111MODULE=off
+  - GO111MODULE=on
+
+go:
+  - 1.9.x
+  - 1.10.x
+  - 1.11.x
+  - master
+
+matrix:
+ fast_finish: true
+ allow_failures:
+   - go: master
+
+sudo: false
+
+script: 
+ - go test -v -cpu=2
+ - go test -v -cpu=2 -race
diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/LICENSE
new file mode 100644
index 0000000..bd899d8
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2015, Pierre Curto
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+* Neither the name of xxHash nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md
new file mode 100644
index 0000000..e71ebd5
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/README.md
@@ -0,0 +1,24 @@
+[![godoc](https://godoc.org/github.com/pierrec/lz4?status.png)](https://godoc.org/github.com/pierrec/lz4)
+
+# lz4
+LZ4 compression and decompression in pure Go.
+
+## Usage
+
+```go
+import "github.com/pierrec/lz4/v2"
+```
+
+## Description
+Package lz4 implements reading and writing lz4 compressed data (a frame),
+as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html.
+
+This package is **compatible with the LZ4 frame format** although the block level compression 
+and decompression functions are exposed and are fully compatible with the lz4 block format 
+definition, they are low level and should not be used directly.
+
+For a complete description of an lz4 compressed block, see:
+http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
+
+See https://github.com/Cyan4973/lz4 for the reference C implementation.
+
diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go
new file mode 100644
index 0000000..00b1111
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/block.go
@@ -0,0 +1,397 @@
+package lz4
+
+import (
+	"encoding/binary"
+	"errors"
+)
+
+var (
+	// ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
+	// block is corrupted or the destination buffer is not large enough for the uncompressed data.
+	ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short")
+	// ErrInvalid is returned when reading an invalid LZ4 archive.
+	ErrInvalid = errors.New("lz4: bad magic number")
+)
+
+// blockHash hashes 4 bytes into a value < winSize.
+func blockHash(x uint32) uint32 {
+	const hasher uint32 = 2654435761 // Knuth multiplicative hash.
+	return x * hasher >> hashShift
+}
+
+// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
+func CompressBlockBound(n int) int {
+	return n + n/255 + 16
+}
+
+// UncompressBlock uncompresses the source buffer into the destination one,
+// and returns the uncompressed size.
+//
+// The destination buffer must be sized appropriately.
+//
+// An error is returned if the source data is invalid or the destination buffer is too small.
+func UncompressBlock(src, dst []byte) (si int, err error) {
+	defer func() {
+		// It is now faster to let the runtime panic and recover on out of bound slice access
+		// than checking indices as we go along.
+		if recover() != nil {
+			err = ErrInvalidSourceShortBuffer
+		}
+	}()
+	sn := len(src)
+	if sn == 0 {
+		return 0, nil
+	}
+	var di int
+
+	for {
+		// Literals and match lengths (token).
+		b := int(src[si])
+		si++
+
+		// Literals.
+		if lLen := b >> 4; lLen > 0 {
+			if lLen == 0xF {
+				for src[si] == 0xFF {
+					lLen += 0xFF
+					si++
+				}
+				lLen += int(src[si])
+				si++
+			}
+			i := si
+			si += lLen
+			di += copy(dst[di:], src[i:si])
+
+			if si >= sn {
+				return di, nil
+			}
+		}
+
+		si++
+		_ = src[si] // Bound check elimination.
+		offset := int(src[si-1]) | int(src[si])<<8
+		si++
+
+		// Match.
+		mLen := b & 0xF
+		if mLen == 0xF {
+			for src[si] == 0xFF {
+				mLen += 0xFF
+				si++
+			}
+			mLen += int(src[si])
+			si++
+		}
+		mLen += minMatch
+
+		// Copy the match.
+		i := di - offset
+		if offset > 0 && mLen >= offset {
+			// Efficiently copy the match dst[di-offset:di] into the dst slice.
+			bytesToCopy := offset * (mLen / offset)
+			expanded := dst[i:]
+			for n := offset; n <= bytesToCopy+offset; n *= 2 {
+				copy(expanded[n:], expanded[:n])
+			}
+			di += bytesToCopy
+			mLen -= bytesToCopy
+		}
+		di += copy(dst[di:], dst[i:i+mLen])
+	}
+}
+
+// CompressBlock compresses the source buffer into the destination one.
+// This is the fast version of LZ4 compression and also the default one.
+// The size of hashTable must be at least 64Kb.
+//
+// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible.
+//
+// An error is returned if the destination buffer is too small.
+func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) {
+	defer func() {
+		if recover() != nil {
+			err = ErrInvalidSourceShortBuffer
+		}
+	}()
+
+	sn, dn := len(src)-mfLimit, len(dst)
+	if sn <= 0 || dn == 0 {
+		return 0, nil
+	}
+	var si int
+
+	// Fast scan strategy: the hash table only stores the last 4 bytes sequences.
+	// const accInit = 1 << skipStrength
+
+	anchor := si // Position of the current literals.
+	// acc := accInit // Variable step: improves performance on non-compressible data.
+
+	for si < sn {
+		// Hash the next 4 bytes (sequence)...
+		match := binary.LittleEndian.Uint32(src[si:])
+		h := blockHash(match)
+
+		ref := hashTable[h]
+		hashTable[h] = si
+		if ref >= sn { // Invalid reference (dirty hashtable).
+			si++
+			continue
+		}
+		offset := si - ref
+		if offset <= 0 || offset >= winSize || // Out of window.
+			match != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches.
+			// si += acc >> skipStrength
+			// acc++
+			si++
+			continue
+		}
+
+		// Match found.
+		// acc = accInit
+		lLen := si - anchor // Literal length.
+
+		// Encode match length part 1.
+		si += minMatch
+		mLen := si // Match length has minMatch already.
+		// Find the longest match, first looking by batches of 8 bytes.
+		for si < sn && binary.LittleEndian.Uint64(src[si:]) == binary.LittleEndian.Uint64(src[si-offset:]) {
+			si += 8
+		}
+		// Then byte by byte.
+		for si < sn && src[si] == src[si-offset] {
+			si++
+		}
+
+		mLen = si - mLen
+		if mLen < 0xF {
+			dst[di] = byte(mLen)
+		} else {
+			dst[di] = 0xF
+		}
+
+		// Encode literals length.
+		if lLen < 0xF {
+			dst[di] |= byte(lLen << 4)
+		} else {
+			dst[di] |= 0xF0
+			di++
+			l := lLen - 0xF
+			for ; l >= 0xFF; l -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			dst[di] = byte(l)
+		}
+		di++
+
+		// Literals.
+		copy(dst[di:], src[anchor:anchor+lLen])
+		di += lLen + 2
+		anchor = si
+
+		// Encode offset.
+		_ = dst[di] // Bound check elimination.
+		dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+		// Encode match length part 2.
+		if mLen >= 0xF {
+			for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			dst[di] = byte(mLen)
+			di++
+		}
+	}
+
+	if anchor == 0 {
+		// Incompressible.
+		return 0, nil
+	}
+
+	// Last literals.
+	lLen := len(src) - anchor
+	if lLen < 0xF {
+		dst[di] = byte(lLen << 4)
+	} else {
+		dst[di] = 0xF0
+		di++
+		for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF {
+			dst[di] = 0xFF
+			di++
+		}
+		dst[di] = byte(lLen)
+	}
+	di++
+
+	// Write the last literals.
+	if di >= anchor {
+		// Incompressible.
+		return 0, nil
+	}
+	di += copy(dst[di:], src[anchor:])
+	return di, nil
+}
+
+// CompressBlockHC compresses the source buffer src into the destination dst
+// with max search depth (use 0 or negative value for no max).
+//
+// CompressBlockHC compression ratio is better than CompressBlock but it is also slower.
+//
+// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible.
+//
+// An error is returned if the destination buffer is too small.
+func CompressBlockHC(src, dst []byte, depth int) (di int, err error) {
+	defer func() {
+		if recover() != nil {
+			err = ErrInvalidSourceShortBuffer
+		}
+	}()
+
+	sn, dn := len(src)-mfLimit, len(dst)
+	if sn <= 0 || dn == 0 {
+		return 0, nil
+	}
+	var si int
+
+	// hashTable: stores the last position found for a given hash
+	// chaingTable: stores previous positions for a given hash
+	var hashTable, chainTable [winSize]int
+
+	if depth <= 0 {
+		depth = winSize
+	}
+
+	anchor := si
+	for si < sn {
+		// Hash the next 4 bytes (sequence).
+		match := binary.LittleEndian.Uint32(src[si:])
+		h := blockHash(match)
+
+		// Follow the chain until out of window and give the longest match.
+		mLen := 0
+		offset := 0
+		for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] {
+			// The first (mLen==0) or next byte (mLen>=minMatch) at current match length
+			// must match to improve on the match length.
+			if src[next+mLen] != src[si+mLen] {
+				continue
+			}
+			ml := 0
+			// Compare the current position with a previous with the same hash.
+			for ml < sn-si && binary.LittleEndian.Uint64(src[next+ml:]) == binary.LittleEndian.Uint64(src[si+ml:]) {
+				ml += 8
+			}
+			for ml < sn-si && src[next+ml] == src[si+ml] {
+				ml++
+			}
+			if ml < minMatch || ml <= mLen {
+				// Match too small (<minMath) or smaller than the current match.
+				continue
+			}
+			// Found a longer match, keep its position and length.
+			mLen = ml
+			offset = si - next
+			// Try another previous position with the same hash.
+			try--
+		}
+		chainTable[si&winMask] = hashTable[h]
+		hashTable[h] = si
+
+		// No match found.
+		if mLen == 0 {
+			si++
+			continue
+		}
+
+		// Match found.
+		// Update hash/chain tables with overlapping bytes:
+		// si already hashed, add everything from si+1 up to the match length.
+		winStart := si + 1
+		if ws := si + mLen - winSize; ws > winStart {
+			winStart = ws
+		}
+		for si, ml := winStart, si+mLen; si < ml; {
+			match >>= 8
+			match |= uint32(src[si+3]) << 24
+			h := blockHash(match)
+			chainTable[si&winMask] = hashTable[h]
+			hashTable[h] = si
+			si++
+		}
+
+		lLen := si - anchor
+		si += mLen
+		mLen -= minMatch // Match length does not include minMatch.
+
+		if mLen < 0xF {
+			dst[di] = byte(mLen)
+		} else {
+			dst[di] = 0xF
+		}
+
+		// Encode literals length.
+		if lLen < 0xF {
+			dst[di] |= byte(lLen << 4)
+		} else {
+			dst[di] |= 0xF0
+			di++
+			l := lLen - 0xF
+			for ; l >= 0xFF; l -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			dst[di] = byte(l)
+		}
+		di++
+
+		// Literals.
+		copy(dst[di:], src[anchor:anchor+lLen])
+		di += lLen
+		anchor = si
+
+		// Encode offset.
+		di += 2
+		dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+		// Encode match length part 2.
+		if mLen >= 0xF {
+			for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			dst[di] = byte(mLen)
+			di++
+		}
+	}
+
+	if anchor == 0 {
+		// Incompressible.
+		return 0, nil
+	}
+
+	// Last literals.
+	lLen := len(src) - anchor
+	if lLen < 0xF {
+		dst[di] = byte(lLen << 4)
+	} else {
+		dst[di] = 0xF0
+		di++
+		lLen -= 0xF
+		for ; lLen >= 0xFF; lLen -= 0xFF {
+			dst[di] = 0xFF
+			di++
+		}
+		dst[di] = byte(lLen)
+	}
+	di++
+
+	// Write the last literals.
+	if di >= anchor {
+		// Incompressible.
+		return 0, nil
+	}
+	di += copy(dst[di:], src[anchor:])
+	return di, nil
+}
diff --git a/vendor/github.com/pierrec/lz4/debug.go b/vendor/github.com/pierrec/lz4/debug.go
new file mode 100644
index 0000000..bc5e78d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/debug.go
@@ -0,0 +1,23 @@
+// +build lz4debug
+
+package lz4
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"runtime"
+)
+
+const debugFlag = true
+
+func debug(args ...interface{}) {
+	_, file, line, _ := runtime.Caller(1)
+	file = filepath.Base(file)
+
+	f := fmt.Sprintf("LZ4: %s:%d %s", file, line, args[0])
+	if f[len(f)-1] != '\n' {
+		f += "\n"
+	}
+	fmt.Fprintf(os.Stderr, f, args[1:]...)
+}
diff --git a/vendor/github.com/pierrec/lz4/debug_stub.go b/vendor/github.com/pierrec/lz4/debug_stub.go
new file mode 100644
index 0000000..44211ad
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/debug_stub.go
@@ -0,0 +1,7 @@
+// +build !lz4debug
+
+package lz4
+
+const debugFlag = false
+
+func debug(args ...interface{}) {}
diff --git a/vendor/github.com/pierrec/lz4/go.mod b/vendor/github.com/pierrec/lz4/go.mod
new file mode 100644
index 0000000..f9f570a
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/go.mod
@@ -0,0 +1,3 @@
+module github.com/pierrec/lz4
+
+require github.com/pkg/profile v1.2.1
diff --git a/vendor/github.com/pierrec/lz4/go.sum b/vendor/github.com/pierrec/lz4/go.sum
new file mode 100644
index 0000000..6ca7598
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/go.sum
@@ -0,0 +1,2 @@
+github.com/pkg/profile v1.2.1 h1:F++O52m40owAmADcojzM+9gyjmMOY/T4oYJkgFDH8RE=
+github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
diff --git a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go
new file mode 100644
index 0000000..850a6fd
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go
@@ -0,0 +1,222 @@
+// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version).
+// (https://github.com/Cyan4973/XXH/)
+package xxh32
+
+import (
+	"encoding/binary"
+)
+
+const (
+	prime32_1 uint32 = 2654435761
+	prime32_2 uint32 = 2246822519
+	prime32_3 uint32 = 3266489917
+	prime32_4 uint32 = 668265263
+	prime32_5 uint32 = 374761393
+
+	prime32_1plus2 uint32 = 606290984
+	prime32_minus1 uint32 = 1640531535
+)
+
+// XXHZero represents an xxhash32 object with seed 0.
+type XXHZero struct {
+	v1       uint32
+	v2       uint32
+	v3       uint32
+	v4       uint32
+	totalLen uint64
+	buf      [16]byte
+	bufused  int
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+// It does not change the underlying hash state.
+func (xxh XXHZero) Sum(b []byte) []byte {
+	h32 := xxh.Sum32()
+	return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24))
+}
+
+// Reset resets the Hash to its initial state.
+func (xxh *XXHZero) Reset() {
+	xxh.v1 = prime32_1plus2
+	xxh.v2 = prime32_2
+	xxh.v3 = 0
+	xxh.v4 = prime32_minus1
+	xxh.totalLen = 0
+	xxh.bufused = 0
+}
+
+// Size returns the number of bytes returned by Sum().
+func (xxh *XXHZero) Size() int {
+	return 4
+}
+
+// BlockSize gives the minimum number of bytes accepted by Write().
+func (xxh *XXHZero) BlockSize() int {
+	return 1
+}
+
+// Write adds input bytes to the Hash.
+// It never returns an error.
+func (xxh *XXHZero) Write(input []byte) (int, error) {
+	if xxh.totalLen == 0 {
+		xxh.Reset()
+	}
+	n := len(input)
+	m := xxh.bufused
+
+	xxh.totalLen += uint64(n)
+
+	r := len(xxh.buf) - m
+	if n < r {
+		copy(xxh.buf[m:], input)
+		xxh.bufused += len(input)
+		return n, nil
+	}
+
+	p := 0
+	// Causes compiler to work directly from registers instead of stack:
+	v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4
+	if m > 0 {
+		// some data left from previous update
+		copy(xxh.buf[xxh.bufused:], input[:r])
+		xxh.bufused += len(input) - r
+
+		// fast rotl(13)
+		buf := xxh.buf[:16] // BCE hint.
+		v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime32_2) * prime32_1
+		v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime32_2) * prime32_1
+		v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime32_2) * prime32_1
+		v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime32_2) * prime32_1
+		p = r
+		xxh.bufused = 0
+	}
+
+	for n := n - 16; p <= n; p += 16 {
+		sub := input[p:][:16] //BCE hint for compiler
+		v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime32_2) * prime32_1
+		v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime32_2) * prime32_1
+		v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime32_2) * prime32_1
+		v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime32_2) * prime32_1
+	}
+	xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4
+
+	copy(xxh.buf[xxh.bufused:], input[p:])
+	xxh.bufused += len(input) - p
+
+	return n, nil
+}
+
+// Sum32 returns the 32 bits Hash value.
+func (xxh *XXHZero) Sum32() uint32 {
+	h32 := uint32(xxh.totalLen)
+	if h32 >= 16 {
+		h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4)
+	} else {
+		h32 += prime32_5
+	}
+
+	p := 0
+	n := xxh.bufused
+	buf := xxh.buf
+	for n := n - 4; p <= n; p += 4 {
+		h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime32_3
+		h32 = rol17(h32) * prime32_4
+	}
+	for ; p < n; p++ {
+		h32 += uint32(buf[p]) * prime32_5
+		h32 = rol11(h32) * prime32_1
+	}
+
+	h32 ^= h32 >> 15
+	h32 *= prime32_2
+	h32 ^= h32 >> 13
+	h32 *= prime32_3
+	h32 ^= h32 >> 16
+
+	return h32
+}
+
+// ChecksumZero returns the 32bits Hash value.
+func ChecksumZero(input []byte) uint32 {
+	n := len(input)
+	h32 := uint32(n)
+
+	if n < 16 {
+		h32 += prime32_5
+	} else {
+		v1 := prime32_1plus2
+		v2 := prime32_2
+		v3 := uint32(0)
+		v4 := prime32_minus1
+		p := 0
+		for n := n - 16; p <= n; p += 16 {
+			sub := input[p:][:16] //BCE hint for compiler
+			v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime32_2) * prime32_1
+			v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime32_2) * prime32_1
+			v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime32_2) * prime32_1
+			v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime32_2) * prime32_1
+		}
+		input = input[p:]
+		n -= p
+		h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+	}
+
+	p := 0
+	for n := n - 4; p <= n; p += 4 {
+		h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime32_3
+		h32 = rol17(h32) * prime32_4
+	}
+	for p < n {
+		h32 += uint32(input[p]) * prime32_5
+		h32 = rol11(h32) * prime32_1
+		p++
+	}
+
+	h32 ^= h32 >> 15
+	h32 *= prime32_2
+	h32 ^= h32 >> 13
+	h32 *= prime32_3
+	h32 ^= h32 >> 16
+
+	return h32
+}
+
+// Uint32Zero hashes x with seed 0.
+func Uint32Zero(x uint32) uint32 {
+	h := prime32_5 + 4 + x*prime32_3
+	h = rol17(h) * prime32_4
+	h ^= h >> 15
+	h *= prime32_2
+	h ^= h >> 13
+	h *= prime32_3
+	h ^= h >> 16
+	return h
+}
+
+func rol1(u uint32) uint32 {
+	return u<<1 | u>>31
+}
+
+func rol7(u uint32) uint32 {
+	return u<<7 | u>>25
+}
+
+func rol11(u uint32) uint32 {
+	return u<<11 | u>>21
+}
+
+func rol12(u uint32) uint32 {
+	return u<<12 | u>>20
+}
+
+func rol13(u uint32) uint32 {
+	return u<<13 | u>>19
+}
+
+func rol17(u uint32) uint32 {
+	return u<<17 | u>>15
+}
+
+func rol18(u uint32) uint32 {
+	return u<<18 | u>>14
+}
diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go
new file mode 100644
index 0000000..3580275
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/lz4.go
@@ -0,0 +1,68 @@
+// Package lz4 implements reading and writing lz4 compressed data (a frame),
+// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html.
+//
+// Although the block level compression and decompression functions are exposed and are fully compatible
+// with the lz4 block format definition, they are low level and should not be used directly.
+// For a complete description of an lz4 compressed block, see:
+// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
+//
+// See https://github.com/Cyan4973/lz4 for the reference C implementation.
+//
+package lz4
+
+const (
+	// Extension is the LZ4 frame file name extension
+	Extension = ".lz4"
+	// Version is the LZ4 frame format version
+	Version = 1
+
+	frameMagic     uint32 = 0x184D2204
+	frameSkipMagic uint32 = 0x184D2A50
+
+	// The following constants are used to setup the compression algorithm.
+	minMatch            = 4  // the minimum size of the match sequence size (4 bytes)
+	winSizeLog          = 16 // LZ4 64Kb window size limit
+	winSize             = 1 << winSizeLog
+	winMask             = winSize - 1 // 64Kb window of previous data for dependent blocks
+	compressedBlockFlag = 1 << 31
+	compressedBlockMask = compressedBlockFlag - 1
+
+	// hashLog determines the size of the hash table used to quickly find a previous match position.
+	// Its value influences the compression speed and memory usage, the lower the faster,
+	// but at the expense of the compression ratio.
+	// 16 seems to be the best compromise.
+	hashLog       = 16
+	hashTableSize = 1 << hashLog
+	hashShift     = uint((minMatch * 8) - hashLog)
+
+	mfLimit      = 8 + minMatch // The last match cannot start within the last 12 bytes.
+	skipStrength = 6            // variable step for fast scan
+)
+
+// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb.
+var (
+	bsMapID    = map[byte]int{4: 64 << 10, 5: 256 << 10, 6: 1 << 20, 7: 4 << 20}
+	bsMapValue = make(map[int]byte, len(bsMapID))
+)
+
+// Reversed.
+func init() {
+	for i, v := range bsMapID {
+		bsMapValue[v] = i
+	}
+}
+
+// Header describes the various flags that can be set on a Writer or obtained from a Reader.
+// The default values match those of the LZ4 frame format definition
+// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html).
+//
+// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls.
+// It is the caller responsibility to check them if necessary.
+type Header struct {
+	BlockChecksum    bool   // Compressed blocks checksum flag.
+	NoChecksum       bool   // Frame checksum flag.
+	BlockMaxSize     int    // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB.
+	Size             uint64 // Frame total size. It is _not_ computed by the Writer.
+	CompressionLevel int    // Compression level (higher is better, use 0 for fastest compression).
+	done             bool   // Header processed flag (Read or Write and checked).
+}
diff --git a/vendor/github.com/pierrec/lz4/lz4_go1.10.go b/vendor/github.com/pierrec/lz4/lz4_go1.10.go
new file mode 100644
index 0000000..9a0fb00
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/lz4_go1.10.go
@@ -0,0 +1,29 @@
+//+build go1.10
+
+package lz4
+
+import (
+	"fmt"
+	"strings"
+)
+
+func (h Header) String() string {
+	var s strings.Builder
+
+	s.WriteString(fmt.Sprintf("%T{", h))
+	if h.BlockChecksum {
+		s.WriteString("BlockChecksum: true ")
+	}
+	if h.NoChecksum {
+		s.WriteString("NoChecksum: true ")
+	}
+	if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
+		s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs))
+	}
+	if l := h.CompressionLevel; l != 0 {
+		s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l))
+	}
+	s.WriteByte('}')
+
+	return s.String()
+}
diff --git a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go
new file mode 100644
index 0000000..12c761a
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go
@@ -0,0 +1,29 @@
+//+build !go1.10
+
+package lz4
+
+import (
+	"bytes"
+	"fmt"
+)
+
+func (h Header) String() string {
+	var s bytes.Buffer
+
+	s.WriteString(fmt.Sprintf("%T{", h))
+	if h.BlockChecksum {
+		s.WriteString("BlockChecksum: true ")
+	}
+	if h.NoChecksum {
+		s.WriteString("NoChecksum: true ")
+	}
+	if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
+		s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs))
+	}
+	if l := h.CompressionLevel; l != 0 {
+		s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l))
+	}
+	s.WriteByte('}')
+
+	return s.String()
+}
diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go
new file mode 100644
index 0000000..f08db47
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/reader.go
@@ -0,0 +1,295 @@
+package lz4
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+	"io/ioutil"
+
+	"github.com/pierrec/lz4/internal/xxh32"
+)
+
+// Reader implements the LZ4 frame decoder.
+// The Header is set after the first call to Read().
+// The Header may change between Read() calls in case of concatenated frames.
+type Reader struct {
+	Header
+
+	buf      [8]byte       // Scrap buffer.
+	pos      int64         // Current position in src.
+	src      io.Reader     // Source.
+	zdata    []byte        // Compressed data.
+	data     []byte        // Uncompressed data.
+	idx      int           // Index of unread bytes into data.
+	checksum xxh32.XXHZero // Frame hash.
+}
+
+// NewReader returns a new LZ4 frame decoder.
+// No access to the underlying io.Reader is performed.
+func NewReader(src io.Reader) *Reader {
+	r := &Reader{src: src}
+	return r
+}
+
+// readHeader checks the frame magic number and parses the frame descriptoz.
+// Skippable frames are supported even as a first frame although the LZ4
+// specifications recommends skippable frames not to be used as first frames.
+func (z *Reader) readHeader(first bool) error {
+	defer z.checksum.Reset()
+
+	buf := z.buf[:]
+	for {
+		magic, err := z.readUint32()
+		if err != nil {
+			z.pos += 4
+			if !first && err == io.ErrUnexpectedEOF {
+				return io.EOF
+			}
+			return err
+		}
+		if magic == frameMagic {
+			break
+		}
+		if magic>>8 != frameSkipMagic>>8 {
+			return ErrInvalid
+		}
+		skipSize, err := z.readUint32()
+		if err != nil {
+			return err
+		}
+		z.pos += 4
+		m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize))
+		if err != nil {
+			return err
+		}
+		z.pos += m
+	}
+
+	// Header.
+	if _, err := io.ReadFull(z.src, buf[:2]); err != nil {
+		return err
+	}
+	z.pos += 8
+
+	b := buf[0]
+	if v := b >> 6; v != Version {
+		return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version)
+	}
+	if b>>5&1 == 0 {
+		return fmt.Errorf("lz4: block dependency not supported")
+	}
+	z.BlockChecksum = b>>4&1 > 0
+	frameSize := b>>3&1 > 0
+	z.NoChecksum = b>>2&1 == 0
+
+	bmsID := buf[1] >> 4 & 0x7
+	bSize, ok := bsMapID[bmsID]
+	if !ok {
+		return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID)
+	}
+	z.BlockMaxSize = bSize
+
+	// Allocate the compressed/uncompressed buffers.
+	// The compressed buffer cannot exceed the uncompressed one.
+	if n := 2 * bSize; cap(z.zdata) < n {
+		z.zdata = make([]byte, n, n)
+	}
+	if debugFlag {
+		debug("header block max size id=%d size=%d", bmsID, bSize)
+	}
+	z.zdata = z.zdata[:bSize]
+	z.data = z.zdata[:cap(z.zdata)][bSize:]
+	z.idx = len(z.data)
+
+	z.checksum.Write(buf[0:2])
+
+	if frameSize {
+		buf := buf[:8]
+		if _, err := io.ReadFull(z.src, buf); err != nil {
+			return err
+		}
+		z.Size = binary.LittleEndian.Uint64(buf)
+		z.pos += 8
+		z.checksum.Write(buf)
+	}
+
+	// Header checksum.
+	if _, err := io.ReadFull(z.src, buf[:1]); err != nil {
+		return err
+	}
+	z.pos++
+	if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] {
+		return fmt.Errorf("lz4: invalid header checksum: got %x; expected %x", buf[0], h)
+	}
+
+	z.Header.done = true
+	if debugFlag {
+		debug("header read: %v", z.Header)
+	}
+
+	return nil
+}
+
+// Read decompresses data from the underlying source into the supplied buffer.
+//
+// Since there can be multiple streams concatenated, Header values may
+// change between calls to Read(). If that is the case, no data is actually read from
+// the underlying io.Reader, to allow for potential input buffer resizing.
+func (z *Reader) Read(buf []byte) (int, error) {
+	if debugFlag {
+		debug("Read buf len=%d", len(buf))
+	}
+	if !z.Header.done {
+		if err := z.readHeader(true); err != nil {
+			return 0, err
+		}
+		if debugFlag {
+			debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d",
+				len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx)
+		}
+	}
+
+	if len(buf) == 0 {
+		return 0, nil
+	}
+
+	if z.idx == len(z.data) {
+		// No data ready for reading, process the next block.
+		if debugFlag {
+			debug("reading block from writer")
+		}
+		// Block length: 0 = end of frame, highest bit set: uncompressed.
+		bLen, err := z.readUint32()
+		if err != nil {
+			return 0, err
+		}
+		z.pos += 4
+
+		if bLen == 0 {
+			// End of frame reached.
+			if !z.NoChecksum {
+				// Validate the frame checksum.
+				checksum, err := z.readUint32()
+				if err != nil {
+					return 0, err
+				}
+				if debugFlag {
+					debug("frame checksum got=%x / want=%x", z.checksum.Sum32(), checksum)
+				}
+				z.pos += 4
+				if h := z.checksum.Sum32(); checksum != h {
+					return 0, fmt.Errorf("lz4: invalid frame checksum: got %x; expected %x", h, checksum)
+				}
+			}
+
+			// Get ready for the next concatenated frame and keep the position.
+			pos := z.pos
+			z.Reset(z.src)
+			z.pos = pos
+
+			// Since multiple frames can be concatenated, check for more.
+			return 0, z.readHeader(false)
+		}
+
+		if debugFlag {
+			debug("raw block size %d", bLen)
+		}
+		if bLen&compressedBlockFlag > 0 {
+			// Uncompressed block.
+			bLen &= compressedBlockMask
+			if debugFlag {
+				debug("uncompressed block size %d", bLen)
+			}
+			if int(bLen) > cap(z.data) {
+				return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
+			}
+			z.data = z.data[:bLen]
+			if _, err := io.ReadFull(z.src, z.data); err != nil {
+				return 0, err
+			}
+			z.pos += int64(bLen)
+
+			if z.BlockChecksum {
+				checksum, err := z.readUint32()
+				if err != nil {
+					return 0, err
+				}
+				z.pos += 4
+
+				if h := xxh32.ChecksumZero(z.data); h != checksum {
+					return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
+				}
+			}
+
+		} else {
+			// Compressed block.
+			if debugFlag {
+				debug("compressed block size %d", bLen)
+			}
+			if int(bLen) > cap(z.data) {
+				return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
+			}
+			zdata := z.zdata[:bLen]
+			if _, err := io.ReadFull(z.src, zdata); err != nil {
+				return 0, err
+			}
+			z.pos += int64(bLen)
+
+			if z.BlockChecksum {
+				checksum, err := z.readUint32()
+				if err != nil {
+					return 0, err
+				}
+				z.pos += 4
+
+				if h := xxh32.ChecksumZero(zdata); h != checksum {
+					return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
+				}
+			}
+
+			n, err := UncompressBlock(zdata, z.data)
+			if err != nil {
+				return 0, err
+			}
+			z.data = z.data[:n]
+		}
+
+		if !z.NoChecksum {
+			z.checksum.Write(z.data)
+			if debugFlag {
+				debug("current frame checksum %x", z.checksum.Sum32())
+			}
+		}
+		z.idx = 0
+	}
+
+	n := copy(buf, z.data[z.idx:])
+	z.idx += n
+	if debugFlag {
+		debug("copied %d bytes to input", n)
+	}
+
+	return n, nil
+}
+
+// Reset discards the Reader's state and makes it equivalent to the
+// result of its original state from NewReader, but reading from r instead.
+// This permits reusing a Reader rather than allocating a new one.
+func (z *Reader) Reset(r io.Reader) {
+	z.Header = Header{}
+	z.pos = 0
+	z.src = r
+	z.zdata = z.zdata[:0]
+	z.data = z.data[:0]
+	z.idx = 0
+	z.checksum.Reset()
+}
+
+// readUint32 reads an uint32 into the supplied buffer.
+// The idea is to make use of the already allocated buffers avoiding additional allocations.
+func (z *Reader) readUint32() (uint32, error) {
+	buf := z.buf[:4]
+	_, err := io.ReadFull(z.src, buf)
+	x := binary.LittleEndian.Uint32(buf)
+	return x, err
+}
diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go
new file mode 100644
index 0000000..0120438
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/writer.go
@@ -0,0 +1,267 @@
+package lz4
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+
+	"github.com/pierrec/lz4/internal/xxh32"
+)
+
+// Writer implements the LZ4 frame encoder.
+type Writer struct {
+	Header
+
+	buf       [19]byte      // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes
+	dst       io.Writer     // Destination.
+	checksum  xxh32.XXHZero // Frame checksum.
+	zdata     []byte        // Compressed data.
+	data      []byte        // Data to be compressed.
+	idx       int           // Index into data.
+	hashtable [winSize]int  // Hash table used in CompressBlock().
+}
+
+// NewWriter returns a new LZ4 frame encoder.
+// No access to the underlying io.Writer is performed.
+// The supplied Header is checked at the first Write.
+// It is ok to change it before the first Write but then not until a Reset() is performed.
+func NewWriter(dst io.Writer) *Writer {
+	return &Writer{dst: dst}
+}
+
+// writeHeader builds and writes the header (magic+header) to the underlying io.Writer.
+func (z *Writer) writeHeader() error {
+	// Default to 4Mb if BlockMaxSize is not set.
+	if z.Header.BlockMaxSize == 0 {
+		z.Header.BlockMaxSize = bsMapID[7]
+	}
+	// The only option that needs to be validated.
+	bSize := z.Header.BlockMaxSize
+	bSizeID, ok := bsMapValue[bSize]
+	if !ok {
+		return fmt.Errorf("lz4: invalid block max size: %d", bSize)
+	}
+	// Allocate the compressed/uncompressed buffers.
+	// The compressed buffer cannot exceed the uncompressed one.
+	if n := 2 * bSize; cap(z.zdata) < n {
+		z.zdata = make([]byte, n, n)
+	}
+	z.zdata = z.zdata[:bSize]
+	z.data = z.zdata[:cap(z.zdata)][bSize:]
+	z.idx = 0
+
+	// Size is optional.
+	buf := z.buf[:]
+
+	// Set the fixed size data: magic number, block max size and flags.
+	binary.LittleEndian.PutUint32(buf[0:], frameMagic)
+	flg := byte(Version << 6)
+	flg |= 1 << 5 // No block dependency.
+	if z.Header.BlockChecksum {
+		flg |= 1 << 4
+	}
+	if z.Header.Size > 0 {
+		flg |= 1 << 3
+	}
+	if !z.Header.NoChecksum {
+		flg |= 1 << 2
+	}
+	buf[4] = flg
+	buf[5] = bSizeID << 4
+
+	// Current buffer size: magic(4) + flags(1) + block max size (1).
+	n := 6
+	// Optional items.
+	if z.Header.Size > 0 {
+		binary.LittleEndian.PutUint64(buf[n:], z.Header.Size)
+		n += 8
+	}
+
+	// The header checksum includes the flags, block max size and optional Size.
+	buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF)
+	z.checksum.Reset()
+
+	// Header ready, write it out.
+	if _, err := z.dst.Write(buf[0 : n+1]); err != nil {
+		return err
+	}
+	z.Header.done = true
+	if debugFlag {
+		debug("wrote header %v", z.Header)
+	}
+
+	return nil
+}
+
+// Write compresses data from the supplied buffer into the underlying io.Writer.
+// Write does not return until the data has been written.
+func (z *Writer) Write(buf []byte) (int, error) {
+	if !z.Header.done {
+		if err := z.writeHeader(); err != nil {
+			return 0, err
+		}
+	}
+	if debugFlag {
+		debug("input buffer len=%d index=%d", len(buf), z.idx)
+	}
+
+	zn := len(z.data)
+	var n int
+	for len(buf) > 0 {
+		if z.idx == 0 && len(buf) >= zn {
+			// Avoid a copy as there is enough data for a block.
+			if err := z.compressBlock(buf[:zn]); err != nil {
+				return n, err
+			}
+			n += zn
+			buf = buf[zn:]
+			continue
+		}
+		// Accumulate the data to be compressed.
+		m := copy(z.data[z.idx:], buf)
+		n += m
+		z.idx += m
+		buf = buf[m:]
+		if debugFlag {
+			debug("%d bytes copied to buf, current index %d", n, z.idx)
+		}
+
+		if z.idx < len(z.data) {
+			// Buffer not filled.
+			if debugFlag {
+				debug("need more data for compression")
+			}
+			return n, nil
+		}
+
+		// Buffer full.
+		if err := z.compressBlock(z.data); err != nil {
+			return n, err
+		}
+		z.idx = 0
+	}
+
+	return n, nil
+}
+
+// compressBlock compresses a block.
+func (z *Writer) compressBlock(data []byte) error {
+	if !z.NoChecksum {
+		z.checksum.Write(data)
+	}
+
+	// The compressed block size cannot exceed the input's.
+	var zn int
+	var err error
+
+	if level := z.Header.CompressionLevel; level != 0 {
+		zn, err = CompressBlockHC(data, z.zdata, level)
+	} else {
+		zn, err = CompressBlock(data, z.zdata, z.hashtable[:])
+	}
+
+	var zdata []byte
+	var bLen uint32
+	if debugFlag {
+		debug("block compression %d => %d", len(data), zn)
+	}
+	if err == nil && zn > 0 && zn < len(data) {
+		// Compressible and compressed size smaller than uncompressed: ok!
+		bLen = uint32(zn)
+		zdata = z.zdata[:zn]
+	} else {
+		// Uncompressed block.
+		bLen = uint32(len(data)) | compressedBlockFlag
+		zdata = data
+	}
+	if debugFlag {
+		debug("block compression to be written len=%d data len=%d", bLen, len(zdata))
+	}
+
+	// Write the block.
+	if err := z.writeUint32(bLen); err != nil {
+		return err
+	}
+	if _, err := z.dst.Write(zdata); err != nil {
+		return err
+	}
+
+	if z.BlockChecksum {
+		checksum := xxh32.ChecksumZero(zdata)
+		if debugFlag {
+			debug("block checksum %x", checksum)
+		}
+		if err := z.writeUint32(checksum); err != nil {
+			return err
+		}
+	}
+	if debugFlag {
+		debug("current frame checksum %x", z.checksum.Sum32())
+	}
+
+	return nil
+}
+
+// Flush flushes any pending compressed data to the underlying writer.
+// Flush does not return until the data has been written.
+// If the underlying writer returns an error, Flush returns that error.
+func (z *Writer) Flush() error {
+	if debugFlag {
+		debug("flush with index %d", z.idx)
+	}
+	if z.idx == 0 {
+		return nil
+	}
+
+	return z.compressBlock(z.data[:z.idx])
+}
+
+// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer.
+func (z *Writer) Close() error {
+	if !z.Header.done {
+		if err := z.writeHeader(); err != nil {
+			return err
+		}
+	}
+
+	if err := z.Flush(); err != nil {
+		return err
+	}
+
+	if debugFlag {
+		debug("writing last empty block")
+	}
+	if err := z.writeUint32(0); err != nil {
+		return err
+	}
+	if !z.NoChecksum {
+		checksum := z.checksum.Sum32()
+		if debugFlag {
+			debug("stream checksum %x", checksum)
+		}
+		if err := z.writeUint32(checksum); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Reset clears the state of the Writer z such that it is equivalent to its
+// initial state from NewWriter, but instead writing to w.
+// No access to the underlying io.Writer is performed.
+func (z *Writer) Reset(w io.Writer) {
+	z.Header = Header{}
+	z.dst = w
+	z.checksum.Reset()
+	z.zdata = z.zdata[:0]
+	z.data = z.data[:0]
+	z.idx = 0
+}
+
+// writeUint32 writes a uint32 to the underlying writer.
+func (z *Writer) writeUint32(x uint32) error {
+	buf := z.buf[:4]
+	binary.LittleEndian.PutUint32(buf, x)
+	_, err := z.dst.Write(buf)
+	return err
+}
diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE
new file mode 100644
index 0000000..c67dad6
--- /dev/null
+++ b/vendor/github.com/pmezard/go-difflib/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013, Patrick Mezard
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+    The names of its contributors may not be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
new file mode 100644
index 0000000..003e99f
--- /dev/null
+++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
@@ -0,0 +1,772 @@
+// Package difflib is a partial port of Python difflib module.
+//
+// It provides tools to compare sequences of strings and generate textual diffs.
+//
+// The following class and functions have been ported:
+//
+// - SequenceMatcher
+//
+// - unified_diff
+//
+// - context_diff
+//
+// Getting unified diffs was the main goal of the port. Keep in mind this code
+// is mostly suitable to output text differences in a human friendly way, there
+// are no guarantees generated diffs are consumable by patch(1).
+package difflib
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strings"
+)
+
+func min(a, b int) int {
+	if a < b {
+		return a
+	}
+	return b
+}
+
+func max(a, b int) int {
+	if a > b {
+		return a
+	}
+	return b
+}
+
+func calculateRatio(matches, length int) float64 {
+	if length > 0 {
+		return 2.0 * float64(matches) / float64(length)
+	}
+	return 1.0
+}
+
+type Match struct {
+	A    int
+	B    int
+	Size int
+}
+
+type OpCode struct {
+	Tag byte
+	I1  int
+	I2  int
+	J1  int
+	J2  int
+}
+
+// SequenceMatcher compares sequence of strings. The basic
+// algorithm predates, and is a little fancier than, an algorithm
+// published in the late 1980's by Ratcliff and Obershelp under the
+// hyperbolic name "gestalt pattern matching".  The basic idea is to find
+// the longest contiguous matching subsequence that contains no "junk"
+// elements (R-O doesn't address junk).  The same idea is then applied
+// recursively to the pieces of the sequences to the left and to the right
+// of the matching subsequence.  This does not yield minimal edit
+// sequences, but does tend to yield matches that "look right" to people.
+//
+// SequenceMatcher tries to compute a "human-friendly diff" between two
+// sequences.  Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+// longest *contiguous* & junk-free matching subsequence.  That's what
+// catches peoples' eyes.  The Windows(tm) windiff has another interesting
+// notion, pairing up elements that appear uniquely in each sequence.
+// That, and the method here, appear to yield more intuitive difference
+// reports than does diff.  This method appears to be the least vulnerable
+// to synching up on blocks of "junk lines", though (like blank lines in
+// ordinary text files, or maybe "<P>" lines in HTML files).  That may be
+// because this is the only method of the 3 that has a *concept* of
+// "junk" <wink>.
+//
+// Timing:  Basic R-O is cubic time worst case and quadratic time expected
+// case.  SequenceMatcher is quadratic time for the worst case and has
+// expected-case behavior dependent in a complicated way on how many
+// elements the sequences have in common; best case time is linear.
+type SequenceMatcher struct {
+	a              []string
+	b              []string
+	b2j            map[string][]int
+	IsJunk         func(string) bool
+	autoJunk       bool
+	bJunk          map[string]struct{}
+	matchingBlocks []Match
+	fullBCount     map[string]int
+	bPopular       map[string]struct{}
+	opCodes        []OpCode
+}
+
+func NewMatcher(a, b []string) *SequenceMatcher {
+	m := SequenceMatcher{autoJunk: true}
+	m.SetSeqs(a, b)
+	return &m
+}
+
+func NewMatcherWithJunk(a, b []string, autoJunk bool,
+	isJunk func(string) bool) *SequenceMatcher {
+
+	m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
+	m.SetSeqs(a, b)
+	return &m
+}
+
+// Set two sequences to be compared.
+func (m *SequenceMatcher) SetSeqs(a, b []string) {
+	m.SetSeq1(a)
+	m.SetSeq2(b)
+}
+
+// Set the first sequence to be compared. The second sequence to be compared is
+// not changed.
+//
+// SequenceMatcher computes and caches detailed information about the second
+// sequence, so if you want to compare one sequence S against many sequences,
+// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
+// sequences.
+//
+// See also SetSeqs() and SetSeq2().
+func (m *SequenceMatcher) SetSeq1(a []string) {
+	if &a == &m.a {
+		return
+	}
+	m.a = a
+	m.matchingBlocks = nil
+	m.opCodes = nil
+}
+
+// Set the second sequence to be compared. The first sequence to be compared is
+// not changed.
+func (m *SequenceMatcher) SetSeq2(b []string) {
+	if &b == &m.b {
+		return
+	}
+	m.b = b
+	m.matchingBlocks = nil
+	m.opCodes = nil
+	m.fullBCount = nil
+	m.chainB()
+}
+
+func (m *SequenceMatcher) chainB() {
+	// Populate line -> index mapping
+	b2j := map[string][]int{}
+	for i, s := range m.b {
+		indices := b2j[s]
+		indices = append(indices, i)
+		b2j[s] = indices
+	}
+
+	// Purge junk elements
+	m.bJunk = map[string]struct{}{}
+	if m.IsJunk != nil {
+		junk := m.bJunk
+		for s, _ := range b2j {
+			if m.IsJunk(s) {
+				junk[s] = struct{}{}
+			}
+		}
+		for s, _ := range junk {
+			delete(b2j, s)
+		}
+	}
+
+	// Purge remaining popular elements
+	popular := map[string]struct{}{}
+	n := len(m.b)
+	if m.autoJunk && n >= 200 {
+		ntest := n/100 + 1
+		for s, indices := range b2j {
+			if len(indices) > ntest {
+				popular[s] = struct{}{}
+			}
+		}
+		for s, _ := range popular {
+			delete(b2j, s)
+		}
+	}
+	m.bPopular = popular
+	m.b2j = b2j
+}
+
+func (m *SequenceMatcher) isBJunk(s string) bool {
+	_, ok := m.bJunk[s]
+	return ok
+}
+
+// Find longest matching block in a[alo:ahi] and b[blo:bhi].
+//
+// If IsJunk is not defined:
+//
+// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+//     alo <= i <= i+k <= ahi
+//     blo <= j <= j+k <= bhi
+// and for all (i',j',k') meeting those conditions,
+//     k >= k'
+//     i <= i'
+//     and if i == i', j <= j'
+//
+// In other words, of all maximal matching blocks, return one that
+// starts earliest in a, and of all those maximal matching blocks that
+// start earliest in a, return the one that starts earliest in b.
+//
+// If IsJunk is defined, first the longest matching block is
+// determined as above, but with the additional restriction that no
+// junk element appears in the block.  Then that block is extended as
+// far as possible by matching (only) junk elements on both sides.  So
+// the resulting block never matches on junk except as identical junk
+// happens to be adjacent to an "interesting" match.
+//
+// If no blocks match, return (alo, blo, 0).
+func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
+	// CAUTION:  stripping common prefix or suffix would be incorrect.
+	// E.g.,
+	//    ab
+	//    acab
+	// Longest matching block is "ab", but if common prefix is
+	// stripped, it's "a" (tied with "b").  UNIX(tm) diff does so
+	// strip, so ends up claiming that ab is changed to acab by
+	// inserting "ca" in the middle.  That's minimal but unintuitive:
+	// "it's obvious" that someone inserted "ac" at the front.
+	// Windiff ends up at the same place as diff, but by pairing up
+	// the unique 'b's and then matching the first two 'a's.
+	besti, bestj, bestsize := alo, blo, 0
+
+	// find longest junk-free match
+	// during an iteration of the loop, j2len[j] = length of longest
+	// junk-free match ending with a[i-1] and b[j]
+	j2len := map[int]int{}
+	for i := alo; i != ahi; i++ {
+		// look at all instances of a[i] in b; note that because
+		// b2j has no junk keys, the loop is skipped if a[i] is junk
+		newj2len := map[int]int{}
+		for _, j := range m.b2j[m.a[i]] {
+			// a[i] matches b[j]
+			if j < blo {
+				continue
+			}
+			if j >= bhi {
+				break
+			}
+			k := j2len[j-1] + 1
+			newj2len[j] = k
+			if k > bestsize {
+				besti, bestj, bestsize = i-k+1, j-k+1, k
+			}
+		}
+		j2len = newj2len
+	}
+
+	// Extend the best by non-junk elements on each end.  In particular,
+	// "popular" non-junk elements aren't in b2j, which greatly speeds
+	// the inner loop above, but also means "the best" match so far
+	// doesn't contain any junk *or* popular non-junk elements.
+	for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
+		m.a[besti-1] == m.b[bestj-1] {
+		besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+	}
+	for besti+bestsize < ahi && bestj+bestsize < bhi &&
+		!m.isBJunk(m.b[bestj+bestsize]) &&
+		m.a[besti+bestsize] == m.b[bestj+bestsize] {
+		bestsize += 1
+	}
+
+	// Now that we have a wholly interesting match (albeit possibly
+	// empty!), we may as well suck up the matching junk on each
+	// side of it too.  Can't think of a good reason not to, and it
+	// saves post-processing the (possibly considerable) expense of
+	// figuring out what to do with it.  In the case of an empty
+	// interesting match, this is clearly the right thing to do,
+	// because no other kind of match is possible in the regions.
+	for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
+		m.a[besti-1] == m.b[bestj-1] {
+		besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+	}
+	for besti+bestsize < ahi && bestj+bestsize < bhi &&
+		m.isBJunk(m.b[bestj+bestsize]) &&
+		m.a[besti+bestsize] == m.b[bestj+bestsize] {
+		bestsize += 1
+	}
+
+	return Match{A: besti, B: bestj, Size: bestsize}
+}
+
+// Return list of triples describing matching subsequences.
+//
+// Each triple is of the form (i, j, n), and means that
+// a[i:i+n] == b[j:j+n].  The triples are monotonically increasing in
+// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
+// adjacent triples in the list, and the second is not the last triple in the
+// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
+// adjacent equal blocks.
+//
+// The last triple is a dummy, (len(a), len(b), 0), and is the only
+// triple with n==0.
+func (m *SequenceMatcher) GetMatchingBlocks() []Match {
+	if m.matchingBlocks != nil {
+		return m.matchingBlocks
+	}
+
+	var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
+	matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
+		match := m.findLongestMatch(alo, ahi, blo, bhi)
+		i, j, k := match.A, match.B, match.Size
+		if match.Size > 0 {
+			if alo < i && blo < j {
+				matched = matchBlocks(alo, i, blo, j, matched)
+			}
+			matched = append(matched, match)
+			if i+k < ahi && j+k < bhi {
+				matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
+			}
+		}
+		return matched
+	}
+	matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
+
+	// It's possible that we have adjacent equal blocks in the
+	// matching_blocks list now.
+	nonAdjacent := []Match{}
+	i1, j1, k1 := 0, 0, 0
+	for _, b := range matched {
+		// Is this block adjacent to i1, j1, k1?
+		i2, j2, k2 := b.A, b.B, b.Size
+		if i1+k1 == i2 && j1+k1 == j2 {
+			// Yes, so collapse them -- this just increases the length of
+			// the first block by the length of the second, and the first
+			// block so lengthened remains the block to compare against.
+			k1 += k2
+		} else {
+			// Not adjacent.  Remember the first block (k1==0 means it's
+			// the dummy we started with), and make the second block the
+			// new block to compare against.
+			if k1 > 0 {
+				nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+			}
+			i1, j1, k1 = i2, j2, k2
+		}
+	}
+	if k1 > 0 {
+		nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+	}
+
+	nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
+	m.matchingBlocks = nonAdjacent
+	return m.matchingBlocks
+}
+
+// Return list of 5-tuples describing how to turn a into b.
+//
+// Each tuple is of the form (tag, i1, i2, j1, j2).  The first tuple
+// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+// tuple preceding it, and likewise for j1 == the previous j2.
+//
+// The tags are characters, with these meanings:
+//
+// 'r' (replace):  a[i1:i2] should be replaced by b[j1:j2]
+//
+// 'd' (delete):   a[i1:i2] should be deleted, j1==j2 in this case.
+//
+// 'i' (insert):   b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
+//
+// 'e' (equal):    a[i1:i2] == b[j1:j2]
+func (m *SequenceMatcher) GetOpCodes() []OpCode {
+	if m.opCodes != nil {
+		return m.opCodes
+	}
+	i, j := 0, 0
+	matching := m.GetMatchingBlocks()
+	opCodes := make([]OpCode, 0, len(matching))
+	for _, m := range matching {
+		//  invariant:  we've pumped out correct diffs to change
+		//  a[:i] into b[:j], and the next matching block is
+		//  a[ai:ai+size] == b[bj:bj+size]. So we need to pump
+		//  out a diff to change a[i:ai] into b[j:bj], pump out
+		//  the matching block, and move (i,j) beyond the match
+		ai, bj, size := m.A, m.B, m.Size
+		tag := byte(0)
+		if i < ai && j < bj {
+			tag = 'r'
+		} else if i < ai {
+			tag = 'd'
+		} else if j < bj {
+			tag = 'i'
+		}
+		if tag > 0 {
+			opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
+		}
+		i, j = ai+size, bj+size
+		// the list of matching blocks is terminated by a
+		// sentinel with size 0
+		if size > 0 {
+			opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
+		}
+	}
+	m.opCodes = opCodes
+	return m.opCodes
+}
+
+// Isolate change clusters by eliminating ranges with no changes.
+//
+// Return a generator of groups with up to n lines of context.
+// Each group is in the same format as returned by GetOpCodes().
+func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
+	if n < 0 {
+		n = 3
+	}
+	codes := m.GetOpCodes()
+	if len(codes) == 0 {
+		codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
+	}
+	// Fixup leading and trailing groups if they show no changes.
+	if codes[0].Tag == 'e' {
+		c := codes[0]
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+		codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
+	}
+	if codes[len(codes)-1].Tag == 'e' {
+		c := codes[len(codes)-1]
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+		codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
+	}
+	nn := n + n
+	groups := [][]OpCode{}
+	group := []OpCode{}
+	for _, c := range codes {
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+		// End the current group and start a new one whenever
+		// there is a large range with no changes.
+		if c.Tag == 'e' && i2-i1 > nn {
+			group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
+				j1, min(j2, j1+n)})
+			groups = append(groups, group)
+			group = []OpCode{}
+			i1, j1 = max(i1, i2-n), max(j1, j2-n)
+		}
+		group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
+	}
+	if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
+		groups = append(groups, group)
+	}
+	return groups
+}
+
+// Return a measure of the sequences' similarity (float in [0,1]).
+//
+// Where T is the total number of elements in both sequences, and
+// M is the number of matches, this is 2.0*M / T.
+// Note that this is 1 if the sequences are identical, and 0 if
+// they have nothing in common.
+//
+// .Ratio() is expensive to compute if you haven't already computed
+// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
+// want to try .QuickRatio() or .RealQuickRation() first to get an
+// upper bound.
+func (m *SequenceMatcher) Ratio() float64 {
+	matches := 0
+	for _, m := range m.GetMatchingBlocks() {
+		matches += m.Size
+	}
+	return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() relatively quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute.
+func (m *SequenceMatcher) QuickRatio() float64 {
+	// viewing a and b as multisets, set matches to the cardinality
+	// of their intersection; this counts the number of matches
+	// without regard to order, so is clearly an upper bound
+	if m.fullBCount == nil {
+		m.fullBCount = map[string]int{}
+		for _, s := range m.b {
+			m.fullBCount[s] = m.fullBCount[s] + 1
+		}
+	}
+
+	// avail[x] is the number of times x appears in 'b' less the
+	// number of times we've seen it in 'a' so far ... kinda
+	avail := map[string]int{}
+	matches := 0
+	for _, s := range m.a {
+		n, ok := avail[s]
+		if !ok {
+			n = m.fullBCount[s]
+		}
+		avail[s] = n - 1
+		if n > 0 {
+			matches += 1
+		}
+	}
+	return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() very quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute than either .Ratio() or .QuickRatio().
+func (m *SequenceMatcher) RealQuickRatio() float64 {
+	la, lb := len(m.a), len(m.b)
+	return calculateRatio(min(la, lb), la+lb)
+}
+
+// Convert range to the "ed" format
+func formatRangeUnified(start, stop int) string {
+	// Per the diff spec at http://www.unix.org/single_unix_specification/
+	beginning := start + 1 // lines start numbering with one
+	length := stop - start
+	if length == 1 {
+		return fmt.Sprintf("%d", beginning)
+	}
+	if length == 0 {
+		beginning -= 1 // empty ranges begin at line just before the range
+	}
+	return fmt.Sprintf("%d,%d", beginning, length)
+}
+
+// Unified diff parameters
+type UnifiedDiff struct {
+	A        []string // First sequence lines
+	FromFile string   // First file name
+	FromDate string   // First file time
+	B        []string // Second sequence lines
+	ToFile   string   // Second file name
+	ToDate   string   // Second file time
+	Eol      string   // Headers end of line, defaults to LF
+	Context  int      // Number of context lines
+}
+
+// Compare two sequences of lines; generate the delta as a unified diff.
+//
+// Unified diffs are a compact way of showing line changes and a few
+// lines of context.  The number of context lines is set by 'n' which
+// defaults to three.
+//
+// By default, the diff control lines (those with ---, +++, or @@) are
+// created with a trailing newline.  This is helpful so that inputs
+// created from file.readlines() result in diffs that are suitable for
+// file.writelines() since both the inputs and outputs have trailing
+// newlines.
+//
+// For inputs that do not have trailing newlines, set the lineterm
+// argument to "" so that the output will be uniformly newline free.
+//
+// The unidiff format normally has a header for filenames and modification
+// times.  Any or all of these may be specified using strings for
+// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
+// The modification times are normally expressed in the ISO 8601 format.
+func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
+	buf := bufio.NewWriter(writer)
+	defer buf.Flush()
+	wf := func(format string, args ...interface{}) error {
+		_, err := buf.WriteString(fmt.Sprintf(format, args...))
+		return err
+	}
+	ws := func(s string) error {
+		_, err := buf.WriteString(s)
+		return err
+	}
+
+	if len(diff.Eol) == 0 {
+		diff.Eol = "\n"
+	}
+
+	started := false
+	m := NewMatcher(diff.A, diff.B)
+	for _, g := range m.GetGroupedOpCodes(diff.Context) {
+		if !started {
+			started = true
+			fromDate := ""
+			if len(diff.FromDate) > 0 {
+				fromDate = "\t" + diff.FromDate
+			}
+			toDate := ""
+			if len(diff.ToDate) > 0 {
+				toDate = "\t" + diff.ToDate
+			}
+			if diff.FromFile != "" || diff.ToFile != "" {
+				err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
+				if err != nil {
+					return err
+				}
+				err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
+				if err != nil {
+					return err
+				}
+			}
+		}
+		first, last := g[0], g[len(g)-1]
+		range1 := formatRangeUnified(first.I1, last.I2)
+		range2 := formatRangeUnified(first.J1, last.J2)
+		if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
+			return err
+		}
+		for _, c := range g {
+			i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+			if c.Tag == 'e' {
+				for _, line := range diff.A[i1:i2] {
+					if err := ws(" " + line); err != nil {
+						return err
+					}
+				}
+				continue
+			}
+			if c.Tag == 'r' || c.Tag == 'd' {
+				for _, line := range diff.A[i1:i2] {
+					if err := ws("-" + line); err != nil {
+						return err
+					}
+				}
+			}
+			if c.Tag == 'r' || c.Tag == 'i' {
+				for _, line := range diff.B[j1:j2] {
+					if err := ws("+" + line); err != nil {
+						return err
+					}
+				}
+			}
+		}
+	}
+	return nil
+}
+
+// Like WriteUnifiedDiff but returns the diff a string.
+func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
+	w := &bytes.Buffer{}
+	err := WriteUnifiedDiff(w, diff)
+	return string(w.Bytes()), err
+}
+
+// Convert range to the "ed" format.
+func formatRangeContext(start, stop int) string {
+	// Per the diff spec at http://www.unix.org/single_unix_specification/
+	beginning := start + 1 // lines start numbering with one
+	length := stop - start
+	if length == 0 {
+		beginning -= 1 // empty ranges begin at line just before the range
+	}
+	if length <= 1 {
+		return fmt.Sprintf("%d", beginning)
+	}
+	return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
+}
+
+type ContextDiff UnifiedDiff
+
+// Compare two sequences of lines; generate the delta as a context diff.
+//
+// Context diffs are a compact way of showing line changes and a few
+// lines of context. The number of context lines is set by diff.Context
+// which defaults to three.
+//
+// By default, the diff control lines (those with *** or ---) are
+// created with a trailing newline.
+//
+// For inputs that do not have trailing newlines, set the diff.Eol
+// argument to "" so that the output will be uniformly newline free.
+//
+// The context diff format normally has a header for filenames and
+// modification times.  Any or all of these may be specified using
+// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
+// The modification times are normally expressed in the ISO 8601 format.
+// If not specified, the strings default to blanks.
+func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
+	buf := bufio.NewWriter(writer)
+	defer buf.Flush()
+	var diffErr error
+	wf := func(format string, args ...interface{}) {
+		_, err := buf.WriteString(fmt.Sprintf(format, args...))
+		if diffErr == nil && err != nil {
+			diffErr = err
+		}
+	}
+	ws := func(s string) {
+		_, err := buf.WriteString(s)
+		if diffErr == nil && err != nil {
+			diffErr = err
+		}
+	}
+
+	if len(diff.Eol) == 0 {
+		diff.Eol = "\n"
+	}
+
+	prefix := map[byte]string{
+		'i': "+ ",
+		'd': "- ",
+		'r': "! ",
+		'e': "  ",
+	}
+
+	started := false
+	m := NewMatcher(diff.A, diff.B)
+	for _, g := range m.GetGroupedOpCodes(diff.Context) {
+		if !started {
+			started = true
+			fromDate := ""
+			if len(diff.FromDate) > 0 {
+				fromDate = "\t" + diff.FromDate
+			}
+			toDate := ""
+			if len(diff.ToDate) > 0 {
+				toDate = "\t" + diff.ToDate
+			}
+			if diff.FromFile != "" || diff.ToFile != "" {
+				wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
+				wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
+			}
+		}
+
+		first, last := g[0], g[len(g)-1]
+		ws("***************" + diff.Eol)
+
+		range1 := formatRangeContext(first.I1, last.I2)
+		wf("*** %s ****%s", range1, diff.Eol)
+		for _, c := range g {
+			if c.Tag == 'r' || c.Tag == 'd' {
+				for _, cc := range g {
+					if cc.Tag == 'i' {
+						continue
+					}
+					for _, line := range diff.A[cc.I1:cc.I2] {
+						ws(prefix[cc.Tag] + line)
+					}
+				}
+				break
+			}
+		}
+
+		range2 := formatRangeContext(first.J1, last.J2)
+		wf("--- %s ----%s", range2, diff.Eol)
+		for _, c := range g {
+			if c.Tag == 'r' || c.Tag == 'i' {
+				for _, cc := range g {
+					if cc.Tag == 'd' {
+						continue
+					}
+					for _, line := range diff.B[cc.J1:cc.J2] {
+						ws(prefix[cc.Tag] + line)
+					}
+				}
+				break
+			}
+		}
+	}
+	return diffErr
+}
+
+// Like WriteContextDiff but returns the diff a string.
+func GetContextDiffString(diff ContextDiff) (string, error) {
+	w := &bytes.Buffer{}
+	err := WriteContextDiff(w, diff)
+	return string(w.Bytes()), err
+}
+
+// Split a string on "\n" while preserving them. The output can be used
+// as input for UnifiedDiff and ContextDiff structures.
+func SplitLines(s string) []string {
+	lines := strings.SplitAfter(s, "\n")
+	lines[len(lines)-1] += "\n"
+	return lines
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/.gitignore b/vendor/github.com/rcrowley/go-metrics/.gitignore
new file mode 100644
index 0000000..83c8f82
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/.gitignore
@@ -0,0 +1,9 @@
+*.[68]
+*.a
+*.out
+*.swp
+_obj
+_testmain.go
+cmd/metrics-bench/metrics-bench
+cmd/metrics-example/metrics-example
+cmd/never-read/never-read
diff --git a/vendor/github.com/rcrowley/go-metrics/.travis.yml b/vendor/github.com/rcrowley/go-metrics/.travis.yml
new file mode 100644
index 0000000..117763e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+
+go:
+    - 1.3
+    - 1.4
+    - 1.5
+    - 1.6
+    - 1.7
+    - 1.8
+    - 1.9
+
+script:
+    - ./validate.sh
+
+# this should give us faster builds according to 
+# http://docs.travis-ci.com/user/migrating-from-legacy/
+sudo: false
diff --git a/vendor/github.com/rcrowley/go-metrics/LICENSE b/vendor/github.com/rcrowley/go-metrics/LICENSE
new file mode 100644
index 0000000..363fa9e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/LICENSE
@@ -0,0 +1,29 @@
+Copyright 2012 Richard Crowley. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    1.  Redistributions of source code must retain the above copyright
+        notice, this list of conditions and the following disclaimer.
+
+    2.  Redistributions in binary form must reproduce the above
+        copyright notice, this list of conditions and the following
+        disclaimer in the documentation and/or other materials provided
+        with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS
+OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGE.
+
+The views and conclusions contained in the software and documentation
+are those of the authors and should not be interpreted as representing
+official policies, either expressed or implied, of Richard Crowley.
diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md
new file mode 100644
index 0000000..b7356b5
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/README.md
@@ -0,0 +1,168 @@
+go-metrics
+==========
+
+![travis build status](https://travis-ci.org/rcrowley/go-metrics.svg?branch=master)
+
+Go port of Coda Hale's Metrics library: <https://github.com/dropwizard/metrics>.
+
+Documentation: <http://godoc.org/github.com/rcrowley/go-metrics>.
+
+Usage
+-----
+
+Create and update metrics:
+
+```go
+c := metrics.NewCounter()
+metrics.Register("foo", c)
+c.Inc(47)
+
+g := metrics.NewGauge()
+metrics.Register("bar", g)
+g.Update(47)
+
+r := NewRegistry()
+g := metrics.NewRegisteredFunctionalGauge("cache-evictions", r, func() int64 { return cache.getEvictionsCount() })
+
+s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028)
+h := metrics.NewHistogram(s)
+metrics.Register("baz", h)
+h.Update(47)
+
+m := metrics.NewMeter()
+metrics.Register("quux", m)
+m.Mark(47)
+
+t := metrics.NewTimer()
+metrics.Register("bang", t)
+t.Time(func() {})
+t.Update(47)
+```
+
+Register() is not threadsafe. For threadsafe metric registration use
+GetOrRegister:
+
+```go
+t := metrics.GetOrRegisterTimer("account.create.latency", nil)
+t.Time(func() {})
+t.Update(47)
+```
+
+**NOTE:** Be sure to unregister short-lived meters and timers otherwise they will
+leak memory:
+
+```go
+// Will call Stop() on the Meter to allow for garbage collection
+metrics.Unregister("quux")
+// Or similarly for a Timer that embeds a Meter
+metrics.Unregister("bang")
+```
+
+Periodically log every metric in human-readable form to standard error:
+
+```go
+go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
+```
+
+Periodically log every metric in slightly-more-parseable form to syslog:
+
+```go
+w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics")
+go metrics.Syslog(metrics.DefaultRegistry, 60e9, w)
+```
+
+Periodically emit every metric to Graphite using the [Graphite client](https://github.com/cyberdelia/go-metrics-graphite):
+
+```go
+
+import "github.com/cyberdelia/go-metrics-graphite"
+
+addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
+go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr)
+```
+
+Periodically emit every metric into InfluxDB:
+
+**NOTE:** this has been pulled out of the library due to constant fluctuations
+in the InfluxDB API. In fact, all client libraries are on their way out. see
+issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and
+[#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details.
+
+```go
+import "github.com/vrischmann/go-metrics-influxdb"
+
+go influxdb.InfluxDB(metrics.DefaultRegistry,
+  10e9, 
+  "127.0.0.1:8086", 
+  "database-name", 
+  "username", 
+  "password"
+)
+```
+
+Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato):
+
+**Note**: the client included with this repository under the `librato` package
+has been deprecated and moved to the repository linked above.
+
+```go
+import "github.com/mihasya/go-metrics-librato"
+
+go librato.Librato(metrics.DefaultRegistry,
+    10e9,                  // interval
+    "example@example.com", // account owner email address
+    "token",               // Librato API token
+    "hostname",            // source
+    []float64{0.95},       // percentiles to send
+    time.Millisecond,      // time unit
+)
+```
+
+Periodically emit every metric to StatHat:
+
+```go
+import "github.com/rcrowley/go-metrics/stathat"
+
+go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com")
+```
+
+Maintain all metrics along with expvars at `/debug/metrics`:
+
+This uses the same mechanism as [the official expvar](http://golang.org/pkg/expvar/)
+but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars
+as well as all your go-metrics.
+
+
+```go
+import "github.com/rcrowley/go-metrics/exp"
+
+exp.Exp(metrics.DefaultRegistry)
+```
+
+Installation
+------------
+
+```sh
+go get github.com/rcrowley/go-metrics
+```
+
+StatHat support additionally requires their Go client:
+
+```sh
+go get github.com/stathat/go
+```
+
+Publishing Metrics
+------------------
+
+Clients are available for the following destinations:
+
+* Librato - https://github.com/mihasya/go-metrics-librato
+* Graphite - https://github.com/cyberdelia/go-metrics-graphite
+* InfluxDB - https://github.com/vrischmann/go-metrics-influxdb
+* Ganglia - https://github.com/appscode/metlia
+* Prometheus - https://github.com/deathowl/go-metrics-prometheus
+* DataDog - https://github.com/syntaqx/go-metrics-datadog
+* SignalFX - https://github.com/pascallouisperez/go-metrics-signalfx
+* Honeycomb - https://github.com/getspine/go-metrics-honeycomb
+* Wavefront - https://github.com/wavefrontHQ/go-metrics-wavefront
diff --git a/vendor/github.com/rcrowley/go-metrics/counter.go b/vendor/github.com/rcrowley/go-metrics/counter.go
new file mode 100644
index 0000000..bb7b039
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/counter.go
@@ -0,0 +1,112 @@
+package metrics
+
+import "sync/atomic"
+
+// Counters hold an int64 value that can be incremented and decremented.
+type Counter interface {
+	Clear()
+	Count() int64
+	Dec(int64)
+	Inc(int64)
+	Snapshot() Counter
+}
+
+// GetOrRegisterCounter returns an existing Counter or constructs and registers
+// a new StandardCounter.
+func GetOrRegisterCounter(name string, r Registry) Counter {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, NewCounter).(Counter)
+}
+
+// NewCounter constructs a new StandardCounter.
+func NewCounter() Counter {
+	if UseNilMetrics {
+		return NilCounter{}
+	}
+	return &StandardCounter{0}
+}
+
+// NewRegisteredCounter constructs and registers a new StandardCounter.
+func NewRegisteredCounter(name string, r Registry) Counter {
+	c := NewCounter()
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// CounterSnapshot is a read-only copy of another Counter.
+type CounterSnapshot int64
+
+// Clear panics.
+func (CounterSnapshot) Clear() {
+	panic("Clear called on a CounterSnapshot")
+}
+
+// Count returns the count at the time the snapshot was taken.
+func (c CounterSnapshot) Count() int64 { return int64(c) }
+
+// Dec panics.
+func (CounterSnapshot) Dec(int64) {
+	panic("Dec called on a CounterSnapshot")
+}
+
+// Inc panics.
+func (CounterSnapshot) Inc(int64) {
+	panic("Inc called on a CounterSnapshot")
+}
+
+// Snapshot returns the snapshot.
+func (c CounterSnapshot) Snapshot() Counter { return c }
+
+// NilCounter is a no-op Counter.
+type NilCounter struct{}
+
+// Clear is a no-op.
+func (NilCounter) Clear() {}
+
+// Count is a no-op.
+func (NilCounter) Count() int64 { return 0 }
+
+// Dec is a no-op.
+func (NilCounter) Dec(i int64) {}
+
+// Inc is a no-op.
+func (NilCounter) Inc(i int64) {}
+
+// Snapshot is a no-op.
+func (NilCounter) Snapshot() Counter { return NilCounter{} }
+
+// StandardCounter is the standard implementation of a Counter and uses the
+// sync/atomic package to manage a single int64 value.
+type StandardCounter struct {
+	count int64
+}
+
+// Clear sets the counter to zero.
+func (c *StandardCounter) Clear() {
+	atomic.StoreInt64(&c.count, 0)
+}
+
+// Count returns the current count.
+func (c *StandardCounter) Count() int64 {
+	return atomic.LoadInt64(&c.count)
+}
+
+// Dec decrements the counter by the given amount.
+func (c *StandardCounter) Dec(i int64) {
+	atomic.AddInt64(&c.count, -i)
+}
+
+// Inc increments the counter by the given amount.
+func (c *StandardCounter) Inc(i int64) {
+	atomic.AddInt64(&c.count, i)
+}
+
+// Snapshot returns a read-only copy of the counter.
+func (c *StandardCounter) Snapshot() Counter {
+	return CounterSnapshot(c.Count())
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/vendor/github.com/rcrowley/go-metrics/debug.go
new file mode 100644
index 0000000..043ccef
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/debug.go
@@ -0,0 +1,76 @@
+package metrics
+
+import (
+	"runtime/debug"
+	"time"
+)
+
+var (
+	debugMetrics struct {
+		GCStats struct {
+			LastGC Gauge
+			NumGC  Gauge
+			Pause  Histogram
+			//PauseQuantiles Histogram
+			PauseTotal Gauge
+		}
+		ReadGCStats Timer
+	}
+	gcStats debug.GCStats
+)
+
+// Capture new values for the Go garbage collector statistics exported in
+// debug.GCStats.  This is designed to be called as a goroutine.
+func CaptureDebugGCStats(r Registry, d time.Duration) {
+	for _ = range time.Tick(d) {
+		CaptureDebugGCStatsOnce(r)
+	}
+}
+
+// Capture new values for the Go garbage collector statistics exported in
+// debug.GCStats.  This is designed to be called in a background goroutine.
+// Giving a registry which has not been given to RegisterDebugGCStats will
+// panic.
+//
+// Be careful (but much less so) with this because debug.ReadGCStats calls
+// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world
+// operation, isn't something you want to be doing all the time.
+func CaptureDebugGCStatsOnce(r Registry) {
+	lastGC := gcStats.LastGC
+	t := time.Now()
+	debug.ReadGCStats(&gcStats)
+	debugMetrics.ReadGCStats.UpdateSince(t)
+
+	debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano()))
+	debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC))
+	if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) {
+		debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0]))
+	}
+	//debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles)
+	debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal))
+}
+
+// Register metrics for the Go garbage collector statistics exported in
+// debug.GCStats.  The metrics are named by their fully-qualified Go symbols,
+// i.e. debug.GCStats.PauseTotal.
+func RegisterDebugGCStats(r Registry) {
+	debugMetrics.GCStats.LastGC = NewGauge()
+	debugMetrics.GCStats.NumGC = NewGauge()
+	debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015))
+	//debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015))
+	debugMetrics.GCStats.PauseTotal = NewGauge()
+	debugMetrics.ReadGCStats = NewTimer()
+
+	r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC)
+	r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC)
+	r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause)
+	//r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles)
+	r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal)
+	r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats)
+}
+
+// Allocate an initial slice for gcStats.Pause to avoid allocations during
+// normal operation.
+func init() {
+	gcStats.Pause = make([]time.Duration, 11)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/vendor/github.com/rcrowley/go-metrics/ewma.go
new file mode 100644
index 0000000..a8183dd
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/ewma.go
@@ -0,0 +1,138 @@
+package metrics
+
+import (
+	"math"
+	"sync"
+	"sync/atomic"
+)
+
+// EWMAs continuously calculate an exponentially-weighted moving average
+// based on an outside source of clock ticks.
+type EWMA interface {
+	Rate() float64
+	Snapshot() EWMA
+	Tick()
+	Update(int64)
+}
+
+// NewEWMA constructs a new EWMA with the given alpha.
+func NewEWMA(alpha float64) EWMA {
+	if UseNilMetrics {
+		return NilEWMA{}
+	}
+	return &StandardEWMA{alpha: alpha}
+}
+
+// NewEWMA1 constructs a new EWMA for a one-minute moving average.
+func NewEWMA1() EWMA {
+	return NewEWMA(1 - math.Exp(-5.0/60.0/1))
+}
+
+// NewEWMA5 constructs a new EWMA for a five-minute moving average.
+func NewEWMA5() EWMA {
+	return NewEWMA(1 - math.Exp(-5.0/60.0/5))
+}
+
+// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
+func NewEWMA15() EWMA {
+	return NewEWMA(1 - math.Exp(-5.0/60.0/15))
+}
+
+// EWMASnapshot is a read-only copy of another EWMA.
+type EWMASnapshot float64
+
+// Rate returns the rate of events per second at the time the snapshot was
+// taken.
+func (a EWMASnapshot) Rate() float64 { return float64(a) }
+
+// Snapshot returns the snapshot.
+func (a EWMASnapshot) Snapshot() EWMA { return a }
+
+// Tick panics.
+func (EWMASnapshot) Tick() {
+	panic("Tick called on an EWMASnapshot")
+}
+
+// Update panics.
+func (EWMASnapshot) Update(int64) {
+	panic("Update called on an EWMASnapshot")
+}
+
+// NilEWMA is a no-op EWMA.
+type NilEWMA struct{}
+
+// Rate is a no-op.
+func (NilEWMA) Rate() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
+
+// Tick is a no-op.
+func (NilEWMA) Tick() {}
+
+// Update is a no-op.
+func (NilEWMA) Update(n int64) {}
+
+// StandardEWMA is the standard implementation of an EWMA and tracks the number
+// of uncounted events and processes them on each tick.  It uses the
+// sync/atomic package to manage uncounted events.
+type StandardEWMA struct {
+	uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
+	alpha     float64
+	rate      uint64
+	init      uint32
+	mutex     sync.Mutex
+}
+
+// Rate returns the moving average rate of events per second.
+func (a *StandardEWMA) Rate() float64 {
+	currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate)) * float64(1e9)
+	return currentRate
+}
+
+// Snapshot returns a read-only copy of the EWMA.
+func (a *StandardEWMA) Snapshot() EWMA {
+	return EWMASnapshot(a.Rate())
+}
+
+// Tick ticks the clock to update the moving average.  It assumes it is called
+// every five seconds.
+func (a *StandardEWMA) Tick() {
+	// Optimization to avoid mutex locking in the hot-path.
+	if atomic.LoadUint32(&a.init) == 1 {
+		a.updateRate(a.fetchInstantRate())
+	} else {
+		// Slow-path: this is only needed on the first Tick() and preserves transactional updating
+		// of init and rate in the else block. The first conditional is needed below because
+		// a different thread could have set a.init = 1 between the time of the first atomic load and when
+		// the lock was acquired.
+		a.mutex.Lock()
+		if atomic.LoadUint32(&a.init) == 1 {
+			// The fetchInstantRate() uses atomic loading, which is unecessary in this critical section
+			// but again, this section is only invoked on the first successful Tick() operation.
+			a.updateRate(a.fetchInstantRate())
+		} else {
+			atomic.StoreUint32(&a.init, 1)
+			atomic.StoreUint64(&a.rate, math.Float64bits(a.fetchInstantRate()))
+		}
+		a.mutex.Unlock()
+	}
+}
+
+func (a *StandardEWMA) fetchInstantRate() float64 {
+	count := atomic.LoadInt64(&a.uncounted)
+	atomic.AddInt64(&a.uncounted, -count)
+	instantRate := float64(count) / float64(5e9)
+	return instantRate
+}
+
+func (a *StandardEWMA) updateRate(instantRate float64) {
+	currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate))
+	currentRate += a.alpha * (instantRate - currentRate)
+	atomic.StoreUint64(&a.rate, math.Float64bits(currentRate))
+}
+
+// Update adds n uncounted events.
+func (a *StandardEWMA) Update(n int64) {
+	atomic.AddInt64(&a.uncounted, n)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge.go b/vendor/github.com/rcrowley/go-metrics/gauge.go
new file mode 100644
index 0000000..cb57a93
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/gauge.go
@@ -0,0 +1,120 @@
+package metrics
+
+import "sync/atomic"
+
+// Gauges hold an int64 value that can be set arbitrarily.
+type Gauge interface {
+	Snapshot() Gauge
+	Update(int64)
+	Value() int64
+}
+
+// GetOrRegisterGauge returns an existing Gauge or constructs and registers a
+// new StandardGauge.
+func GetOrRegisterGauge(name string, r Registry) Gauge {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, NewGauge).(Gauge)
+}
+
+// NewGauge constructs a new StandardGauge.
+func NewGauge() Gauge {
+	if UseNilMetrics {
+		return NilGauge{}
+	}
+	return &StandardGauge{0}
+}
+
+// NewRegisteredGauge constructs and registers a new StandardGauge.
+func NewRegisteredGauge(name string, r Registry) Gauge {
+	c := NewGauge()
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// NewFunctionalGauge constructs a new FunctionalGauge.
+func NewFunctionalGauge(f func() int64) Gauge {
+	if UseNilMetrics {
+		return NilGauge{}
+	}
+	return &FunctionalGauge{value: f}
+}
+
+// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
+func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge {
+	c := NewFunctionalGauge(f)
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// GaugeSnapshot is a read-only copy of another Gauge.
+type GaugeSnapshot int64
+
+// Snapshot returns the snapshot.
+func (g GaugeSnapshot) Snapshot() Gauge { return g }
+
+// Update panics.
+func (GaugeSnapshot) Update(int64) {
+	panic("Update called on a GaugeSnapshot")
+}
+
+// Value returns the value at the time the snapshot was taken.
+func (g GaugeSnapshot) Value() int64 { return int64(g) }
+
+// NilGauge is a no-op Gauge.
+type NilGauge struct{}
+
+// Snapshot is a no-op.
+func (NilGauge) Snapshot() Gauge { return NilGauge{} }
+
+// Update is a no-op.
+func (NilGauge) Update(v int64) {}
+
+// Value is a no-op.
+func (NilGauge) Value() int64 { return 0 }
+
+// StandardGauge is the standard implementation of a Gauge and uses the
+// sync/atomic package to manage a single int64 value.
+type StandardGauge struct {
+	value int64
+}
+
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGauge) Snapshot() Gauge {
+	return GaugeSnapshot(g.Value())
+}
+
+// Update updates the gauge's value.
+func (g *StandardGauge) Update(v int64) {
+	atomic.StoreInt64(&g.value, v)
+}
+
+// Value returns the gauge's current value.
+func (g *StandardGauge) Value() int64 {
+	return atomic.LoadInt64(&g.value)
+}
+
+// FunctionalGauge returns value from given function
+type FunctionalGauge struct {
+	value func() int64
+}
+
+// Value returns the gauge's current value.
+func (g FunctionalGauge) Value() int64 {
+	return g.value()
+}
+
+// Snapshot returns the snapshot.
+func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) }
+
+// Update panics.
+func (FunctionalGauge) Update(int64) {
+	panic("Update called on a FunctionalGauge")
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
new file mode 100644
index 0000000..3962e6d
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
@@ -0,0 +1,125 @@
+package metrics
+
+import (
+	"math"
+	"sync/atomic"
+)
+
+// GaugeFloat64s hold a float64 value that can be set arbitrarily.
+type GaugeFloat64 interface {
+	Snapshot() GaugeFloat64
+	Update(float64)
+	Value() float64
+}
+
+// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a
+// new StandardGaugeFloat64.
+func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64)
+}
+
+// NewGaugeFloat64 constructs a new StandardGaugeFloat64.
+func NewGaugeFloat64() GaugeFloat64 {
+	if UseNilMetrics {
+		return NilGaugeFloat64{}
+	}
+	return &StandardGaugeFloat64{
+		value: 0.0,
+	}
+}
+
+// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64.
+func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 {
+	c := NewGaugeFloat64()
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// NewFunctionalGauge constructs a new FunctionalGauge.
+func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 {
+	if UseNilMetrics {
+		return NilGaugeFloat64{}
+	}
+	return &FunctionalGaugeFloat64{value: f}
+}
+
+// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
+func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 {
+	c := NewFunctionalGaugeFloat64(f)
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
+type GaugeFloat64Snapshot float64
+
+// Snapshot returns the snapshot.
+func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g }
+
+// Update panics.
+func (GaugeFloat64Snapshot) Update(float64) {
+	panic("Update called on a GaugeFloat64Snapshot")
+}
+
+// Value returns the value at the time the snapshot was taken.
+func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) }
+
+// NilGauge is a no-op Gauge.
+type NilGaugeFloat64 struct{}
+
+// Snapshot is a no-op.
+func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} }
+
+// Update is a no-op.
+func (NilGaugeFloat64) Update(v float64) {}
+
+// Value is a no-op.
+func (NilGaugeFloat64) Value() float64 { return 0.0 }
+
+// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses
+// sync.Mutex to manage a single float64 value.
+type StandardGaugeFloat64 struct {
+	value uint64
+}
+
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 {
+	return GaugeFloat64Snapshot(g.Value())
+}
+
+// Update updates the gauge's value.
+func (g *StandardGaugeFloat64) Update(v float64) {
+	atomic.StoreUint64(&g.value, math.Float64bits(v))
+}
+
+// Value returns the gauge's current value.
+func (g *StandardGaugeFloat64) Value() float64 {
+	return math.Float64frombits(atomic.LoadUint64(&g.value))
+}
+
+// FunctionalGaugeFloat64 returns value from given function
+type FunctionalGaugeFloat64 struct {
+	value func() float64
+}
+
+// Value returns the gauge's current value.
+func (g FunctionalGaugeFloat64) Value() float64 {
+	return g.value()
+}
+
+// Snapshot returns the snapshot.
+func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) }
+
+// Update panics.
+func (FunctionalGaugeFloat64) Update(float64) {
+	panic("Update called on a FunctionalGaugeFloat64")
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/graphite.go b/vendor/github.com/rcrowley/go-metrics/graphite.go
new file mode 100644
index 0000000..abd0a7d
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/graphite.go
@@ -0,0 +1,113 @@
+package metrics
+
+import (
+	"bufio"
+	"fmt"
+	"log"
+	"net"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// GraphiteConfig provides a container with configuration parameters for
+// the Graphite exporter
+type GraphiteConfig struct {
+	Addr          *net.TCPAddr  // Network address to connect to
+	Registry      Registry      // Registry to be exported
+	FlushInterval time.Duration // Flush interval
+	DurationUnit  time.Duration // Time conversion unit for durations
+	Prefix        string        // Prefix to be prepended to metric names
+	Percentiles   []float64     // Percentiles to export from timers and histograms
+}
+
+// Graphite is a blocking exporter function which reports metrics in r
+// to a graphite server located at addr, flushing them every d duration
+// and prepending metric names with prefix.
+func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
+	GraphiteWithConfig(GraphiteConfig{
+		Addr:          addr,
+		Registry:      r,
+		FlushInterval: d,
+		DurationUnit:  time.Nanosecond,
+		Prefix:        prefix,
+		Percentiles:   []float64{0.5, 0.75, 0.95, 0.99, 0.999},
+	})
+}
+
+// GraphiteWithConfig is a blocking exporter function just like Graphite,
+// but it takes a GraphiteConfig instead.
+func GraphiteWithConfig(c GraphiteConfig) {
+	log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
+	for _ = range time.Tick(c.FlushInterval) {
+		if err := graphite(&c); nil != err {
+			log.Println(err)
+		}
+	}
+}
+
+// GraphiteOnce performs a single submission to Graphite, returning a
+// non-nil error on failed connections. This can be used in a loop
+// similar to GraphiteWithConfig for custom error handling.
+func GraphiteOnce(c GraphiteConfig) error {
+	log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
+	return graphite(&c)
+}
+
+func graphite(c *GraphiteConfig) error {
+	now := time.Now().Unix()
+	du := float64(c.DurationUnit)
+	conn, err := net.DialTCP("tcp", nil, c.Addr)
+	if nil != err {
+		return err
+	}
+	defer conn.Close()
+	w := bufio.NewWriter(conn)
+	c.Registry.Each(func(name string, i interface{}) {
+		switch metric := i.(type) {
+		case Counter:
+			fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now)
+		case Gauge:
+			fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now)
+		case GaugeFloat64:
+			fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now)
+		case Histogram:
+			h := metric.Snapshot()
+			ps := h.Percentiles(c.Percentiles)
+			fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now)
+			fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now)
+			fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now)
+			fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now)
+			fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now)
+			for psIdx, psKey := range c.Percentiles {
+				key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
+				fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
+			}
+		case Meter:
+			m := metric.Snapshot()
+			fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now)
+			fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now)
+			fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now)
+			fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now)
+			fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now)
+		case Timer:
+			t := metric.Snapshot()
+			ps := t.Percentiles(c.Percentiles)
+			fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now)
+			fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now)
+			fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now)
+			fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now)
+			fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now)
+			for psIdx, psKey := range c.Percentiles {
+				key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
+				fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
+			}
+			fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now)
+			fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now)
+			fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now)
+			fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now)
+		}
+		w.Flush()
+	})
+	return nil
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/healthcheck.go b/vendor/github.com/rcrowley/go-metrics/healthcheck.go
new file mode 100644
index 0000000..445131c
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/healthcheck.go
@@ -0,0 +1,61 @@
+package metrics
+
+// Healthchecks hold an error value describing an arbitrary up/down status.
+type Healthcheck interface {
+	Check()
+	Error() error
+	Healthy()
+	Unhealthy(error)
+}
+
+// NewHealthcheck constructs a new Healthcheck which will use the given
+// function to update its status.
+func NewHealthcheck(f func(Healthcheck)) Healthcheck {
+	if UseNilMetrics {
+		return NilHealthcheck{}
+	}
+	return &StandardHealthcheck{nil, f}
+}
+
+// NilHealthcheck is a no-op.
+type NilHealthcheck struct{}
+
+// Check is a no-op.
+func (NilHealthcheck) Check() {}
+
+// Error is a no-op.
+func (NilHealthcheck) Error() error { return nil }
+
+// Healthy is a no-op.
+func (NilHealthcheck) Healthy() {}
+
+// Unhealthy is a no-op.
+func (NilHealthcheck) Unhealthy(error) {}
+
+// StandardHealthcheck is the standard implementation of a Healthcheck and
+// stores the status and a function to call to update the status.
+type StandardHealthcheck struct {
+	err error
+	f   func(Healthcheck)
+}
+
+// Check runs the healthcheck function to update the healthcheck's status.
+func (h *StandardHealthcheck) Check() {
+	h.f(h)
+}
+
+// Error returns the healthcheck's status, which will be nil if it is healthy.
+func (h *StandardHealthcheck) Error() error {
+	return h.err
+}
+
+// Healthy marks the healthcheck as healthy.
+func (h *StandardHealthcheck) Healthy() {
+	h.err = nil
+}
+
+// Unhealthy marks the healthcheck as unhealthy.  The error is stored and
+// may be retrieved by the Error method.
+func (h *StandardHealthcheck) Unhealthy(err error) {
+	h.err = err
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/histogram.go b/vendor/github.com/rcrowley/go-metrics/histogram.go
new file mode 100644
index 0000000..dbc837f
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/histogram.go
@@ -0,0 +1,202 @@
+package metrics
+
+// Histograms calculate distribution statistics from a series of int64 values.
+type Histogram interface {
+	Clear()
+	Count() int64
+	Max() int64
+	Mean() float64
+	Min() int64
+	Percentile(float64) float64
+	Percentiles([]float64) []float64
+	Sample() Sample
+	Snapshot() Histogram
+	StdDev() float64
+	Sum() int64
+	Update(int64)
+	Variance() float64
+}
+
+// GetOrRegisterHistogram returns an existing Histogram or constructs and
+// registers a new StandardHistogram.
+func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram)
+}
+
+// NewHistogram constructs a new StandardHistogram from a Sample.
+func NewHistogram(s Sample) Histogram {
+	if UseNilMetrics {
+		return NilHistogram{}
+	}
+	return &StandardHistogram{sample: s}
+}
+
+// NewRegisteredHistogram constructs and registers a new StandardHistogram from
+// a Sample.
+func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram {
+	c := NewHistogram(s)
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// HistogramSnapshot is a read-only copy of another Histogram.
+type HistogramSnapshot struct {
+	sample *SampleSnapshot
+}
+
+// Clear panics.
+func (*HistogramSnapshot) Clear() {
+	panic("Clear called on a HistogramSnapshot")
+}
+
+// Count returns the number of samples recorded at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
+
+// Max returns the maximum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
+
+// Mean returns the mean of the values in the sample at the time the snapshot
+// was taken.
+func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
+
+// Min returns the minimum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
+
+// Percentile returns an arbitrary percentile of values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) Percentile(p float64) float64 {
+	return h.sample.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the sample
+// at the time the snapshot was taken.
+func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
+	return h.sample.Percentiles(ps)
+}
+
+// Sample returns the Sample underlying the histogram.
+func (h *HistogramSnapshot) Sample() Sample { return h.sample }
+
+// Snapshot returns the snapshot.
+func (h *HistogramSnapshot) Snapshot() Histogram { return h }
+
+// StdDev returns the standard deviation of the values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
+
+// Sum returns the sum in the sample at the time the snapshot was taken.
+func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
+
+// Update panics.
+func (*HistogramSnapshot) Update(int64) {
+	panic("Update called on a HistogramSnapshot")
+}
+
+// Variance returns the variance of inputs at the time the snapshot was taken.
+func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
+
+// NilHistogram is a no-op Histogram.
+type NilHistogram struct{}
+
+// Clear is a no-op.
+func (NilHistogram) Clear() {}
+
+// Count is a no-op.
+func (NilHistogram) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilHistogram) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilHistogram) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilHistogram) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilHistogram) Percentiles(ps []float64) []float64 {
+	return make([]float64, len(ps))
+}
+
+// Sample is a no-op.
+func (NilHistogram) Sample() Sample { return NilSample{} }
+
+// Snapshot is a no-op.
+func (NilHistogram) Snapshot() Histogram { return NilHistogram{} }
+
+// StdDev is a no-op.
+func (NilHistogram) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilHistogram) Sum() int64 { return 0 }
+
+// Update is a no-op.
+func (NilHistogram) Update(v int64) {}
+
+// Variance is a no-op.
+func (NilHistogram) Variance() float64 { return 0.0 }
+
+// StandardHistogram is the standard implementation of a Histogram and uses a
+// Sample to bound its memory use.
+type StandardHistogram struct {
+	sample Sample
+}
+
+// Clear clears the histogram and its sample.
+func (h *StandardHistogram) Clear() { h.sample.Clear() }
+
+// Count returns the number of samples recorded since the histogram was last
+// cleared.
+func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
+
+// Max returns the maximum value in the sample.
+func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
+
+// Mean returns the mean of the values in the sample.
+func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
+
+// Min returns the minimum value in the sample.
+func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
+
+// Percentile returns an arbitrary percentile of the values in the sample.
+func (h *StandardHistogram) Percentile(p float64) float64 {
+	return h.sample.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
+func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
+	return h.sample.Percentiles(ps)
+}
+
+// Sample returns the Sample underlying the histogram.
+func (h *StandardHistogram) Sample() Sample { return h.sample }
+
+// Snapshot returns a read-only copy of the histogram.
+func (h *StandardHistogram) Snapshot() Histogram {
+	return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
+
+// Sum returns the sum in the sample.
+func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
+
+// Update samples a new value.
+func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
+
+// Variance returns the variance of the values in the sample.
+func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }
diff --git a/vendor/github.com/rcrowley/go-metrics/json.go b/vendor/github.com/rcrowley/go-metrics/json.go
new file mode 100644
index 0000000..174b947
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/json.go
@@ -0,0 +1,31 @@
+package metrics
+
+import (
+	"encoding/json"
+	"io"
+	"time"
+)
+
+// MarshalJSON returns a byte slice containing a JSON representation of all
+// the metrics in the Registry.
+func (r *StandardRegistry) MarshalJSON() ([]byte, error) {
+	return json.Marshal(r.GetAll())
+}
+
+// WriteJSON writes metrics from the given registry  periodically to the
+// specified io.Writer as JSON.
+func WriteJSON(r Registry, d time.Duration, w io.Writer) {
+	for _ = range time.Tick(d) {
+		WriteJSONOnce(r, w)
+	}
+}
+
+// WriteJSONOnce writes metrics from the given registry to the specified
+// io.Writer as JSON.
+func WriteJSONOnce(r Registry, w io.Writer) {
+	json.NewEncoder(w).Encode(r)
+}
+
+func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) {
+	return json.Marshal(p.GetAll())
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go
new file mode 100644
index 0000000..f8074c0
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/log.go
@@ -0,0 +1,80 @@
+package metrics
+
+import (
+	"time"
+)
+
+type Logger interface {
+	Printf(format string, v ...interface{})
+}
+
+func Log(r Registry, freq time.Duration, l Logger) {
+	LogScaled(r, freq, time.Nanosecond, l)
+}
+
+// Output each metric in the given registry periodically using the given
+// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos.
+func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
+	du := float64(scale)
+	duSuffix := scale.String()[1:]
+
+	for _ = range time.Tick(freq) {
+		r.Each(func(name string, i interface{}) {
+			switch metric := i.(type) {
+			case Counter:
+				l.Printf("counter %s\n", name)
+				l.Printf("  count:       %9d\n", metric.Count())
+			case Gauge:
+				l.Printf("gauge %s\n", name)
+				l.Printf("  value:       %9d\n", metric.Value())
+			case GaugeFloat64:
+				l.Printf("gauge %s\n", name)
+				l.Printf("  value:       %f\n", metric.Value())
+			case Healthcheck:
+				metric.Check()
+				l.Printf("healthcheck %s\n", name)
+				l.Printf("  error:       %v\n", metric.Error())
+			case Histogram:
+				h := metric.Snapshot()
+				ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+				l.Printf("histogram %s\n", name)
+				l.Printf("  count:       %9d\n", h.Count())
+				l.Printf("  min:         %9d\n", h.Min())
+				l.Printf("  max:         %9d\n", h.Max())
+				l.Printf("  mean:        %12.2f\n", h.Mean())
+				l.Printf("  stddev:      %12.2f\n", h.StdDev())
+				l.Printf("  median:      %12.2f\n", ps[0])
+				l.Printf("  75%%:         %12.2f\n", ps[1])
+				l.Printf("  95%%:         %12.2f\n", ps[2])
+				l.Printf("  99%%:         %12.2f\n", ps[3])
+				l.Printf("  99.9%%:       %12.2f\n", ps[4])
+			case Meter:
+				m := metric.Snapshot()
+				l.Printf("meter %s\n", name)
+				l.Printf("  count:       %9d\n", m.Count())
+				l.Printf("  1-min rate:  %12.2f\n", m.Rate1())
+				l.Printf("  5-min rate:  %12.2f\n", m.Rate5())
+				l.Printf("  15-min rate: %12.2f\n", m.Rate15())
+				l.Printf("  mean rate:   %12.2f\n", m.RateMean())
+			case Timer:
+				t := metric.Snapshot()
+				ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+				l.Printf("timer %s\n", name)
+				l.Printf("  count:       %9d\n", t.Count())
+				l.Printf("  min:         %12.2f%s\n", float64(t.Min())/du, duSuffix)
+				l.Printf("  max:         %12.2f%s\n", float64(t.Max())/du, duSuffix)
+				l.Printf("  mean:        %12.2f%s\n", t.Mean()/du, duSuffix)
+				l.Printf("  stddev:      %12.2f%s\n", t.StdDev()/du, duSuffix)
+				l.Printf("  median:      %12.2f%s\n", ps[0]/du, duSuffix)
+				l.Printf("  75%%:         %12.2f%s\n", ps[1]/du, duSuffix)
+				l.Printf("  95%%:         %12.2f%s\n", ps[2]/du, duSuffix)
+				l.Printf("  99%%:         %12.2f%s\n", ps[3]/du, duSuffix)
+				l.Printf("  99.9%%:       %12.2f%s\n", ps[4]/du, duSuffix)
+				l.Printf("  1-min rate:  %12.2f\n", t.Rate1())
+				l.Printf("  5-min rate:  %12.2f\n", t.Rate5())
+				l.Printf("  15-min rate: %12.2f\n", t.Rate15())
+				l.Printf("  mean rate:   %12.2f\n", t.RateMean())
+			}
+		})
+	}
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/memory.md b/vendor/github.com/rcrowley/go-metrics/memory.md
new file mode 100644
index 0000000..47454f5
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/memory.md
@@ -0,0 +1,285 @@
+Memory usage
+============
+
+(Highly unscientific.)
+
+Command used to gather static memory usage:
+
+```sh
+grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status"
+```
+
+Program used to gather baseline memory usage:
+
+```go
+package main
+
+import "time"
+
+func main() {
+	time.Sleep(600e9)
+}
+```
+
+Baseline
+--------
+
+```
+VmPeak:    42604 kB
+VmSize:    42604 kB
+VmLck:         0 kB
+VmHWM:      1120 kB
+VmRSS:      1120 kB
+VmData:    35460 kB
+VmStk:       136 kB
+VmExe:      1020 kB
+VmLib:      1848 kB
+VmPTE:        36 kB
+VmSwap:        0 kB
+```
+
+Program used to gather metric memory usage (with other metrics being similar):
+
+```go
+package main
+
+import (
+	"fmt"
+	"metrics"
+	"time"
+)
+
+func main() {
+	fmt.Sprintf("foo")
+	metrics.NewRegistry()
+	time.Sleep(600e9)
+}
+```
+
+1000 counters registered
+------------------------
+
+```
+VmPeak:    44016 kB
+VmSize:    44016 kB
+VmLck:         0 kB
+VmHWM:      1928 kB
+VmRSS:      1928 kB
+VmData:    36868 kB
+VmStk:       136 kB
+VmExe:      1024 kB
+VmLib:      1848 kB
+VmPTE:        40 kB
+VmSwap:        0 kB
+```
+
+**1.412 kB virtual, TODO 0.808 kB resident per counter.**
+
+100000 counters registered
+--------------------------
+
+```
+VmPeak:    55024 kB
+VmSize:    55024 kB
+VmLck:         0 kB
+VmHWM:     12440 kB
+VmRSS:     12440 kB
+VmData:    47876 kB
+VmStk:       136 kB
+VmExe:      1024 kB
+VmLib:      1848 kB
+VmPTE:        64 kB
+VmSwap:        0 kB
+```
+
+**0.1242 kB virtual, 0.1132 kB resident per counter.**
+
+1000 gauges registered
+----------------------
+
+```
+VmPeak:    44012 kB
+VmSize:    44012 kB
+VmLck:         0 kB
+VmHWM:      1928 kB
+VmRSS:      1928 kB
+VmData:    36868 kB
+VmStk:       136 kB
+VmExe:      1020 kB
+VmLib:      1848 kB
+VmPTE:        40 kB
+VmSwap:        0 kB
+```
+
+**1.408 kB virtual, 0.808 kB resident per counter.**
+
+100000 gauges registered
+------------------------
+
+```
+VmPeak:    55020 kB
+VmSize:    55020 kB
+VmLck:         0 kB
+VmHWM:     12432 kB
+VmRSS:     12432 kB
+VmData:    47876 kB
+VmStk:       136 kB
+VmExe:      1020 kB
+VmLib:      1848 kB
+VmPTE:        60 kB
+VmSwap:        0 kB
+```
+
+**0.12416 kB virtual, 0.11312 resident per gauge.**
+
+1000 histograms with a uniform sample size of 1028
+--------------------------------------------------
+
+```
+VmPeak:    72272 kB
+VmSize:    72272 kB
+VmLck:         0 kB
+VmHWM:     16204 kB
+VmRSS:     16204 kB
+VmData:    65100 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:        80 kB
+VmSwap:        0 kB
+```
+
+**29.668 kB virtual, TODO 15.084 resident per histogram.**
+
+10000 histograms with a uniform sample size of 1028
+---------------------------------------------------
+
+```
+VmPeak:   256912 kB
+VmSize:   256912 kB
+VmLck:         0 kB
+VmHWM:    146204 kB
+VmRSS:    146204 kB
+VmData:   249740 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:       448 kB
+VmSwap:        0 kB
+```
+
+**21.4308 kB virtual, 14.5084 kB resident per histogram.**
+
+50000 histograms with a uniform sample size of 1028
+---------------------------------------------------
+
+```
+VmPeak:   908112 kB
+VmSize:   908112 kB
+VmLck:         0 kB
+VmHWM:    645832 kB
+VmRSS:    645588 kB
+VmData:   900940 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:      1716 kB
+VmSwap:     1544 kB
+```
+
+**17.31016 kB virtual, 12.88936 kB resident per histogram.**
+
+1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+-------------------------------------------------------------------------------------
+
+```
+VmPeak:    62480 kB
+VmSize:    62480 kB
+VmLck:         0 kB
+VmHWM:     11572 kB
+VmRSS:     11572 kB
+VmData:    55308 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:        64 kB
+VmSwap:        0 kB
+```
+
+**19.876 kB virtual, 10.452 kB resident per histogram.**
+
+10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+--------------------------------------------------------------------------------------
+
+```
+VmPeak:   153296 kB
+VmSize:   153296 kB
+VmLck:         0 kB
+VmHWM:    101176 kB
+VmRSS:    101176 kB
+VmData:   146124 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:       240 kB
+VmSwap:        0 kB
+```
+
+**11.0692 kB virtual, 10.0056 kB resident per histogram.**
+
+50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+--------------------------------------------------------------------------------------
+
+```
+VmPeak:   557264 kB
+VmSize:   557264 kB
+VmLck:         0 kB
+VmHWM:    501056 kB
+VmRSS:    501056 kB
+VmData:   550092 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:      1032 kB
+VmSwap:        0 kB
+```
+
+**10.2932 kB virtual, 9.99872 kB resident per histogram.**
+
+1000 meters
+-----------
+
+```
+VmPeak:    74504 kB
+VmSize:    74504 kB
+VmLck:         0 kB
+VmHWM:     24124 kB
+VmRSS:     24124 kB
+VmData:    67340 kB
+VmStk:       136 kB
+VmExe:      1040 kB
+VmLib:      1848 kB
+VmPTE:        92 kB
+VmSwap:        0 kB
+```
+
+**31.9 kB virtual, 23.004 kB resident per meter.**
+
+10000 meters
+------------
+
+```
+VmPeak:   278920 kB
+VmSize:   278920 kB
+VmLck:         0 kB
+VmHWM:    227300 kB
+VmRSS:    227300 kB
+VmData:   271756 kB
+VmStk:       136 kB
+VmExe:      1040 kB
+VmLib:      1848 kB
+VmPTE:       488 kB
+VmSwap:        0 kB
+```
+
+**23.6316 kB virtual, 22.618 kB resident per meter.**
diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/vendor/github.com/rcrowley/go-metrics/meter.go
new file mode 100644
index 0000000..223669b
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/meter.go
@@ -0,0 +1,251 @@
+package metrics
+
+import (
+	"math"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// Meters count events to produce exponentially-weighted moving average rates
+// at one-, five-, and fifteen-minutes and a mean rate.
+type Meter interface {
+	Count() int64
+	Mark(int64)
+	Rate1() float64
+	Rate5() float64
+	Rate15() float64
+	RateMean() float64
+	Snapshot() Meter
+	Stop()
+}
+
+// GetOrRegisterMeter returns an existing Meter or constructs and registers a
+// new StandardMeter.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func GetOrRegisterMeter(name string, r Registry) Meter {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, NewMeter).(Meter)
+}
+
+// NewMeter constructs a new StandardMeter and launches a goroutine.
+// Be sure to call Stop() once the meter is of no use to allow for garbage collection.
+func NewMeter() Meter {
+	if UseNilMetrics {
+		return NilMeter{}
+	}
+	m := newStandardMeter()
+	arbiter.Lock()
+	defer arbiter.Unlock()
+	arbiter.meters[m] = struct{}{}
+	if !arbiter.started {
+		arbiter.started = true
+		go arbiter.tick()
+	}
+	return m
+}
+
+// NewMeter constructs and registers a new StandardMeter and launches a
+// goroutine.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func NewRegisteredMeter(name string, r Registry) Meter {
+	c := NewMeter()
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// MeterSnapshot is a read-only copy of another Meter.
+type MeterSnapshot struct {
+	count                          int64
+	rate1, rate5, rate15, rateMean uint64
+}
+
+// Count returns the count of events at the time the snapshot was taken.
+func (m *MeterSnapshot) Count() int64 { return m.count }
+
+// Mark panics.
+func (*MeterSnapshot) Mark(n int64) {
+	panic("Mark called on a MeterSnapshot")
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (m *MeterSnapshot) Rate1() float64 { return math.Float64frombits(m.rate1) }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (m *MeterSnapshot) Rate5() float64 { return math.Float64frombits(m.rate5) }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (m *MeterSnapshot) Rate15() float64 { return math.Float64frombits(m.rate15) }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (m *MeterSnapshot) RateMean() float64 { return math.Float64frombits(m.rateMean) }
+
+// Snapshot returns the snapshot.
+func (m *MeterSnapshot) Snapshot() Meter { return m }
+
+// Stop is a no-op.
+func (m *MeterSnapshot) Stop() {}
+
+// NilMeter is a no-op Meter.
+type NilMeter struct{}
+
+// Count is a no-op.
+func (NilMeter) Count() int64 { return 0 }
+
+// Mark is a no-op.
+func (NilMeter) Mark(n int64) {}
+
+// Rate1 is a no-op.
+func (NilMeter) Rate1() float64 { return 0.0 }
+
+// Rate5 is a no-op.
+func (NilMeter) Rate5() float64 { return 0.0 }
+
+// Rate15is a no-op.
+func (NilMeter) Rate15() float64 { return 0.0 }
+
+// RateMean is a no-op.
+func (NilMeter) RateMean() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilMeter) Snapshot() Meter { return NilMeter{} }
+
+// Stop is a no-op.
+func (NilMeter) Stop() {}
+
+// StandardMeter is the standard implementation of a Meter.
+type StandardMeter struct {
+	snapshot    *MeterSnapshot
+	a1, a5, a15 EWMA
+	startTime   time.Time
+	stopped     uint32
+}
+
+func newStandardMeter() *StandardMeter {
+	return &StandardMeter{
+		snapshot:  &MeterSnapshot{},
+		a1:        NewEWMA1(),
+		a5:        NewEWMA5(),
+		a15:       NewEWMA15(),
+		startTime: time.Now(),
+	}
+}
+
+// Stop stops the meter, Mark() will be a no-op if you use it after being stopped.
+func (m *StandardMeter) Stop() {
+	if atomic.CompareAndSwapUint32(&m.stopped, 0, 1) {
+		arbiter.Lock()
+		delete(arbiter.meters, m)
+		arbiter.Unlock()
+	}
+}
+
+// Count returns the number of events recorded.
+func (m *StandardMeter) Count() int64 {
+	return atomic.LoadInt64(&m.snapshot.count)
+}
+
+// Mark records the occurance of n events.
+func (m *StandardMeter) Mark(n int64) {
+	if atomic.LoadUint32(&m.stopped) == 1 {
+		return
+	}
+
+	atomic.AddInt64(&m.snapshot.count, n)
+
+	m.a1.Update(n)
+	m.a5.Update(n)
+	m.a15.Update(n)
+	m.updateSnapshot()
+}
+
+// Rate1 returns the one-minute moving average rate of events per second.
+func (m *StandardMeter) Rate1() float64 {
+	return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate1))
+}
+
+// Rate5 returns the five-minute moving average rate of events per second.
+func (m *StandardMeter) Rate5() float64 {
+	return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate5))
+}
+
+// Rate15 returns the fifteen-minute moving average rate of events per second.
+func (m *StandardMeter) Rate15() float64 {
+	return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate15))
+}
+
+// RateMean returns the meter's mean rate of events per second.
+func (m *StandardMeter) RateMean() float64 {
+	return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rateMean))
+}
+
+// Snapshot returns a read-only copy of the meter.
+func (m *StandardMeter) Snapshot() Meter {
+	copiedSnapshot := MeterSnapshot{
+		count:    atomic.LoadInt64(&m.snapshot.count),
+		rate1:    atomic.LoadUint64(&m.snapshot.rate1),
+		rate5:    atomic.LoadUint64(&m.snapshot.rate5),
+		rate15:   atomic.LoadUint64(&m.snapshot.rate15),
+		rateMean: atomic.LoadUint64(&m.snapshot.rateMean),
+	}
+	return &copiedSnapshot
+}
+
+func (m *StandardMeter) updateSnapshot() {
+	rate1 := math.Float64bits(m.a1.Rate())
+	rate5 := math.Float64bits(m.a5.Rate())
+	rate15 := math.Float64bits(m.a15.Rate())
+	rateMean := math.Float64bits(float64(m.Count()) / time.Since(m.startTime).Seconds())
+
+	atomic.StoreUint64(&m.snapshot.rate1, rate1)
+	atomic.StoreUint64(&m.snapshot.rate5, rate5)
+	atomic.StoreUint64(&m.snapshot.rate15, rate15)
+	atomic.StoreUint64(&m.snapshot.rateMean, rateMean)
+}
+
+func (m *StandardMeter) tick() {
+	m.a1.Tick()
+	m.a5.Tick()
+	m.a15.Tick()
+	m.updateSnapshot()
+}
+
+// meterArbiter ticks meters every 5s from a single goroutine.
+// meters are references in a set for future stopping.
+type meterArbiter struct {
+	sync.RWMutex
+	started bool
+	meters  map[*StandardMeter]struct{}
+	ticker  *time.Ticker
+}
+
+var arbiter = meterArbiter{ticker: time.NewTicker(5e9), meters: make(map[*StandardMeter]struct{})}
+
+// Ticks meters on the scheduled interval
+func (ma *meterArbiter) tick() {
+	for {
+		select {
+		case <-ma.ticker.C:
+			ma.tickMeters()
+		}
+	}
+}
+
+func (ma *meterArbiter) tickMeters() {
+	ma.RLock()
+	defer ma.RUnlock()
+	for meter := range ma.meters {
+		meter.tick()
+	}
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/metrics.go b/vendor/github.com/rcrowley/go-metrics/metrics.go
new file mode 100644
index 0000000..b97a49e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/metrics.go
@@ -0,0 +1,13 @@
+// Go port of Coda Hale's Metrics library
+//
+// <https://github.com/rcrowley/go-metrics>
+//
+// Coda Hale's original work: <https://github.com/codahale/metrics>
+package metrics
+
+// UseNilMetrics is checked by the constructor functions for all of the
+// standard metrics.  If it is true, the metric returned is a stub.
+//
+// This global kill-switch helps quantify the observer effect and makes
+// for less cluttered pprof profiles.
+var UseNilMetrics bool = false
diff --git a/vendor/github.com/rcrowley/go-metrics/opentsdb.go b/vendor/github.com/rcrowley/go-metrics/opentsdb.go
new file mode 100644
index 0000000..266b6c9
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/opentsdb.go
@@ -0,0 +1,119 @@
+package metrics
+
+import (
+	"bufio"
+	"fmt"
+	"log"
+	"net"
+	"os"
+	"strings"
+	"time"
+)
+
+var shortHostName string = ""
+
+// OpenTSDBConfig provides a container with configuration parameters for
+// the OpenTSDB exporter
+type OpenTSDBConfig struct {
+	Addr          *net.TCPAddr  // Network address to connect to
+	Registry      Registry      // Registry to be exported
+	FlushInterval time.Duration // Flush interval
+	DurationUnit  time.Duration // Time conversion unit for durations
+	Prefix        string        // Prefix to be prepended to metric names
+}
+
+// OpenTSDB is a blocking exporter function which reports metrics in r
+// to a TSDB server located at addr, flushing them every d duration
+// and prepending metric names with prefix.
+func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
+	OpenTSDBWithConfig(OpenTSDBConfig{
+		Addr:          addr,
+		Registry:      r,
+		FlushInterval: d,
+		DurationUnit:  time.Nanosecond,
+		Prefix:        prefix,
+	})
+}
+
+// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB,
+// but it takes a OpenTSDBConfig instead.
+func OpenTSDBWithConfig(c OpenTSDBConfig) {
+	for _ = range time.Tick(c.FlushInterval) {
+		if err := openTSDB(&c); nil != err {
+			log.Println(err)
+		}
+	}
+}
+
+func getShortHostname() string {
+	if shortHostName == "" {
+		host, _ := os.Hostname()
+		if index := strings.Index(host, "."); index > 0 {
+			shortHostName = host[:index]
+		} else {
+			shortHostName = host
+		}
+	}
+	return shortHostName
+}
+
+func openTSDB(c *OpenTSDBConfig) error {
+	shortHostname := getShortHostname()
+	now := time.Now().Unix()
+	du := float64(c.DurationUnit)
+	conn, err := net.DialTCP("tcp", nil, c.Addr)
+	if nil != err {
+		return err
+	}
+	defer conn.Close()
+	w := bufio.NewWriter(conn)
+	c.Registry.Each(func(name string, i interface{}) {
+		switch metric := i.(type) {
+		case Counter:
+			fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
+		case Gauge:
+			fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+		case GaugeFloat64:
+			fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+		case Histogram:
+			h := metric.Snapshot()
+			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname)
+			fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname)
+			fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname)
+			fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname)
+			fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname)
+		case Meter:
+			m := metric.Snapshot()
+			fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname)
+		case Timer:
+			t := metric.Snapshot()
+			ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname)
+		}
+		w.Flush()
+	})
+	return nil
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go
new file mode 100644
index 0000000..b3bab64
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/registry.go
@@ -0,0 +1,363 @@
+package metrics
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+)
+
+// DuplicateMetric is the error returned by Registry.Register when a metric
+// already exists.  If you mean to Register that metric you must first
+// Unregister the existing metric.
+type DuplicateMetric string
+
+func (err DuplicateMetric) Error() string {
+	return fmt.Sprintf("duplicate metric: %s", string(err))
+}
+
+// A Registry holds references to a set of metrics by name and can iterate
+// over them, calling callback functions provided by the user.
+//
+// This is an interface so as to encourage other structs to implement
+// the Registry API as appropriate.
+type Registry interface {
+
+	// Call the given function for each registered metric.
+	Each(func(string, interface{}))
+
+	// Get the metric by the given name or nil if none is registered.
+	Get(string) interface{}
+
+	// GetAll metrics in the Registry.
+	GetAll() map[string]map[string]interface{}
+
+	// Gets an existing metric or registers the given one.
+	// The interface can be the metric to register if not found in registry,
+	// or a function returning the metric for lazy instantiation.
+	GetOrRegister(string, interface{}) interface{}
+
+	// Register the given metric under the given name.
+	Register(string, interface{}) error
+
+	// Run all registered healthchecks.
+	RunHealthchecks()
+
+	// Unregister the metric with the given name.
+	Unregister(string)
+
+	// Unregister all metrics.  (Mostly for testing.)
+	UnregisterAll()
+}
+
+// The standard implementation of a Registry is a mutex-protected map
+// of names to metrics.
+type StandardRegistry struct {
+	metrics map[string]interface{}
+	mutex   sync.RWMutex
+}
+
+// Create a new registry.
+func NewRegistry() Registry {
+	return &StandardRegistry{metrics: make(map[string]interface{})}
+}
+
+// Call the given function for each registered metric.
+func (r *StandardRegistry) Each(f func(string, interface{})) {
+	for name, i := range r.registered() {
+		f(name, i)
+	}
+}
+
+// Get the metric by the given name or nil if none is registered.
+func (r *StandardRegistry) Get(name string) interface{} {
+	r.mutex.RLock()
+	defer r.mutex.RUnlock()
+	return r.metrics[name]
+}
+
+// Gets an existing metric or creates and registers a new one. Threadsafe
+// alternative to calling Get and Register on failure.
+// The interface can be the metric to register if not found in registry,
+// or a function returning the metric for lazy instantiation.
+func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} {
+	// access the read lock first which should be re-entrant
+	r.mutex.RLock()
+	metric, ok := r.metrics[name]
+	r.mutex.RUnlock()
+	if ok {
+		return metric
+	}
+
+	// only take the write lock if we'll be modifying the metrics map
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	if metric, ok := r.metrics[name]; ok {
+		return metric
+	}
+	if v := reflect.ValueOf(i); v.Kind() == reflect.Func {
+		i = v.Call(nil)[0].Interface()
+	}
+	r.register(name, i)
+	return i
+}
+
+// Register the given metric under the given name.  Returns a DuplicateMetric
+// if a metric by the given name is already registered.
+func (r *StandardRegistry) Register(name string, i interface{}) error {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	return r.register(name, i)
+}
+
+// Run all registered healthchecks.
+func (r *StandardRegistry) RunHealthchecks() {
+	r.mutex.RLock()
+	defer r.mutex.RUnlock()
+	for _, i := range r.metrics {
+		if h, ok := i.(Healthcheck); ok {
+			h.Check()
+		}
+	}
+}
+
+// GetAll metrics in the Registry
+func (r *StandardRegistry) GetAll() map[string]map[string]interface{} {
+	data := make(map[string]map[string]interface{})
+	r.Each(func(name string, i interface{}) {
+		values := make(map[string]interface{})
+		switch metric := i.(type) {
+		case Counter:
+			values["count"] = metric.Count()
+		case Gauge:
+			values["value"] = metric.Value()
+		case GaugeFloat64:
+			values["value"] = metric.Value()
+		case Healthcheck:
+			values["error"] = nil
+			metric.Check()
+			if err := metric.Error(); nil != err {
+				values["error"] = metric.Error().Error()
+			}
+		case Histogram:
+			h := metric.Snapshot()
+			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			values["count"] = h.Count()
+			values["min"] = h.Min()
+			values["max"] = h.Max()
+			values["mean"] = h.Mean()
+			values["stddev"] = h.StdDev()
+			values["median"] = ps[0]
+			values["75%"] = ps[1]
+			values["95%"] = ps[2]
+			values["99%"] = ps[3]
+			values["99.9%"] = ps[4]
+		case Meter:
+			m := metric.Snapshot()
+			values["count"] = m.Count()
+			values["1m.rate"] = m.Rate1()
+			values["5m.rate"] = m.Rate5()
+			values["15m.rate"] = m.Rate15()
+			values["mean.rate"] = m.RateMean()
+		case Timer:
+			t := metric.Snapshot()
+			ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			values["count"] = t.Count()
+			values["min"] = t.Min()
+			values["max"] = t.Max()
+			values["mean"] = t.Mean()
+			values["stddev"] = t.StdDev()
+			values["median"] = ps[0]
+			values["75%"] = ps[1]
+			values["95%"] = ps[2]
+			values["99%"] = ps[3]
+			values["99.9%"] = ps[4]
+			values["1m.rate"] = t.Rate1()
+			values["5m.rate"] = t.Rate5()
+			values["15m.rate"] = t.Rate15()
+			values["mean.rate"] = t.RateMean()
+		}
+		data[name] = values
+	})
+	return data
+}
+
+// Unregister the metric with the given name.
+func (r *StandardRegistry) Unregister(name string) {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	r.stop(name)
+	delete(r.metrics, name)
+}
+
+// Unregister all metrics.  (Mostly for testing.)
+func (r *StandardRegistry) UnregisterAll() {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	for name, _ := range r.metrics {
+		r.stop(name)
+		delete(r.metrics, name)
+	}
+}
+
+func (r *StandardRegistry) register(name string, i interface{}) error {
+	if _, ok := r.metrics[name]; ok {
+		return DuplicateMetric(name)
+	}
+	switch i.(type) {
+	case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer:
+		r.metrics[name] = i
+	}
+	return nil
+}
+
+func (r *StandardRegistry) registered() map[string]interface{} {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	metrics := make(map[string]interface{}, len(r.metrics))
+	for name, i := range r.metrics {
+		metrics[name] = i
+	}
+	return metrics
+}
+
+func (r *StandardRegistry) stop(name string) {
+	if i, ok := r.metrics[name]; ok {
+		if s, ok := i.(Stoppable); ok {
+			s.Stop()
+		}
+	}
+}
+
+// Stoppable defines the metrics which has to be stopped.
+type Stoppable interface {
+	Stop()
+}
+
+type PrefixedRegistry struct {
+	underlying Registry
+	prefix     string
+}
+
+func NewPrefixedRegistry(prefix string) Registry {
+	return &PrefixedRegistry{
+		underlying: NewRegistry(),
+		prefix:     prefix,
+	}
+}
+
+func NewPrefixedChildRegistry(parent Registry, prefix string) Registry {
+	return &PrefixedRegistry{
+		underlying: parent,
+		prefix:     prefix,
+	}
+}
+
+// Call the given function for each registered metric.
+func (r *PrefixedRegistry) Each(fn func(string, interface{})) {
+	wrappedFn := func(prefix string) func(string, interface{}) {
+		return func(name string, iface interface{}) {
+			if strings.HasPrefix(name, prefix) {
+				fn(name, iface)
+			} else {
+				return
+			}
+		}
+	}
+
+	baseRegistry, prefix := findPrefix(r, "")
+	baseRegistry.Each(wrappedFn(prefix))
+}
+
+func findPrefix(registry Registry, prefix string) (Registry, string) {
+	switch r := registry.(type) {
+	case *PrefixedRegistry:
+		return findPrefix(r.underlying, r.prefix+prefix)
+	case *StandardRegistry:
+		return r, prefix
+	}
+	return nil, ""
+}
+
+// Get the metric by the given name or nil if none is registered.
+func (r *PrefixedRegistry) Get(name string) interface{} {
+	realName := r.prefix + name
+	return r.underlying.Get(realName)
+}
+
+// Gets an existing metric or registers the given one.
+// The interface can be the metric to register if not found in registry,
+// or a function returning the metric for lazy instantiation.
+func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} {
+	realName := r.prefix + name
+	return r.underlying.GetOrRegister(realName, metric)
+}
+
+// Register the given metric under the given name. The name will be prefixed.
+func (r *PrefixedRegistry) Register(name string, metric interface{}) error {
+	realName := r.prefix + name
+	return r.underlying.Register(realName, metric)
+}
+
+// Run all registered healthchecks.
+func (r *PrefixedRegistry) RunHealthchecks() {
+	r.underlying.RunHealthchecks()
+}
+
+// GetAll metrics in the Registry
+func (r *PrefixedRegistry) GetAll() map[string]map[string]interface{} {
+	return r.underlying.GetAll()
+}
+
+// Unregister the metric with the given name. The name will be prefixed.
+func (r *PrefixedRegistry) Unregister(name string) {
+	realName := r.prefix + name
+	r.underlying.Unregister(realName)
+}
+
+// Unregister all metrics.  (Mostly for testing.)
+func (r *PrefixedRegistry) UnregisterAll() {
+	r.underlying.UnregisterAll()
+}
+
+var DefaultRegistry Registry = NewRegistry()
+
+// Call the given function for each registered metric.
+func Each(f func(string, interface{})) {
+	DefaultRegistry.Each(f)
+}
+
+// Get the metric by the given name or nil if none is registered.
+func Get(name string) interface{} {
+	return DefaultRegistry.Get(name)
+}
+
+// Gets an existing metric or creates and registers a new one. Threadsafe
+// alternative to calling Get and Register on failure.
+func GetOrRegister(name string, i interface{}) interface{} {
+	return DefaultRegistry.GetOrRegister(name, i)
+}
+
+// Register the given metric under the given name.  Returns a DuplicateMetric
+// if a metric by the given name is already registered.
+func Register(name string, i interface{}) error {
+	return DefaultRegistry.Register(name, i)
+}
+
+// Register the given metric under the given name.  Panics if a metric by the
+// given name is already registered.
+func MustRegister(name string, i interface{}) {
+	if err := Register(name, i); err != nil {
+		panic(err)
+	}
+}
+
+// Run all registered healthchecks.
+func RunHealthchecks() {
+	DefaultRegistry.RunHealthchecks()
+}
+
+// Unregister the metric with the given name.
+func Unregister(name string) {
+	DefaultRegistry.Unregister(name)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go
new file mode 100644
index 0000000..11c6b78
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime.go
@@ -0,0 +1,212 @@
+package metrics
+
+import (
+	"runtime"
+	"runtime/pprof"
+	"time"
+)
+
+var (
+	memStats       runtime.MemStats
+	runtimeMetrics struct {
+		MemStats struct {
+			Alloc         Gauge
+			BuckHashSys   Gauge
+			DebugGC       Gauge
+			EnableGC      Gauge
+			Frees         Gauge
+			HeapAlloc     Gauge
+			HeapIdle      Gauge
+			HeapInuse     Gauge
+			HeapObjects   Gauge
+			HeapReleased  Gauge
+			HeapSys       Gauge
+			LastGC        Gauge
+			Lookups       Gauge
+			Mallocs       Gauge
+			MCacheInuse   Gauge
+			MCacheSys     Gauge
+			MSpanInuse    Gauge
+			MSpanSys      Gauge
+			NextGC        Gauge
+			NumGC         Gauge
+			GCCPUFraction GaugeFloat64
+			PauseNs       Histogram
+			PauseTotalNs  Gauge
+			StackInuse    Gauge
+			StackSys      Gauge
+			Sys           Gauge
+			TotalAlloc    Gauge
+		}
+		NumCgoCall   Gauge
+		NumGoroutine Gauge
+		NumThread    Gauge
+		ReadMemStats Timer
+	}
+	frees       uint64
+	lookups     uint64
+	mallocs     uint64
+	numGC       uint32
+	numCgoCalls int64
+
+	threadCreateProfile = pprof.Lookup("threadcreate")
+)
+
+// Capture new values for the Go runtime statistics exported in
+// runtime.MemStats.  This is designed to be called as a goroutine.
+func CaptureRuntimeMemStats(r Registry, d time.Duration) {
+	for _ = range time.Tick(d) {
+		CaptureRuntimeMemStatsOnce(r)
+	}
+}
+
+// Capture new values for the Go runtime statistics exported in
+// runtime.MemStats.  This is designed to be called in a background
+// goroutine.  Giving a registry which has not been given to
+// RegisterRuntimeMemStats will panic.
+//
+// Be very careful with this because runtime.ReadMemStats calls the C
+// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld()
+// and that last one does what it says on the tin.
+func CaptureRuntimeMemStatsOnce(r Registry) {
+	t := time.Now()
+	runtime.ReadMemStats(&memStats) // This takes 50-200us.
+	runtimeMetrics.ReadMemStats.UpdateSince(t)
+
+	runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc))
+	runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys))
+	if memStats.DebugGC {
+		runtimeMetrics.MemStats.DebugGC.Update(1)
+	} else {
+		runtimeMetrics.MemStats.DebugGC.Update(0)
+	}
+	if memStats.EnableGC {
+		runtimeMetrics.MemStats.EnableGC.Update(1)
+	} else {
+		runtimeMetrics.MemStats.EnableGC.Update(0)
+	}
+
+	runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees))
+	runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc))
+	runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle))
+	runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse))
+	runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects))
+	runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased))
+	runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys))
+	runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC))
+	runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups))
+	runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs))
+	runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse))
+	runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys))
+	runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse))
+	runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys))
+	runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC))
+	runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC))
+	runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats))
+
+	// <https://code.google.com/p/go/source/browse/src/pkg/runtime/mgc0.c>
+	i := numGC % uint32(len(memStats.PauseNs))
+	ii := memStats.NumGC % uint32(len(memStats.PauseNs))
+	if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) {
+		for i = 0; i < uint32(len(memStats.PauseNs)); i++ {
+			runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+		}
+	} else {
+		if i > ii {
+			for ; i < uint32(len(memStats.PauseNs)); i++ {
+				runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+			}
+			i = 0
+		}
+		for ; i < ii; i++ {
+			runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+		}
+	}
+	frees = memStats.Frees
+	lookups = memStats.Lookups
+	mallocs = memStats.Mallocs
+	numGC = memStats.NumGC
+
+	runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs))
+	runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse))
+	runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys))
+	runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys))
+	runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc))
+
+	currentNumCgoCalls := numCgoCall()
+	runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls)
+	numCgoCalls = currentNumCgoCalls
+
+	runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine()))
+
+	runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count()))
+}
+
+// Register runtimeMetrics for the Go runtime statistics exported in runtime and
+// specifically runtime.MemStats.  The runtimeMetrics are named by their
+// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc.
+func RegisterRuntimeMemStats(r Registry) {
+	runtimeMetrics.MemStats.Alloc = NewGauge()
+	runtimeMetrics.MemStats.BuckHashSys = NewGauge()
+	runtimeMetrics.MemStats.DebugGC = NewGauge()
+	runtimeMetrics.MemStats.EnableGC = NewGauge()
+	runtimeMetrics.MemStats.Frees = NewGauge()
+	runtimeMetrics.MemStats.HeapAlloc = NewGauge()
+	runtimeMetrics.MemStats.HeapIdle = NewGauge()
+	runtimeMetrics.MemStats.HeapInuse = NewGauge()
+	runtimeMetrics.MemStats.HeapObjects = NewGauge()
+	runtimeMetrics.MemStats.HeapReleased = NewGauge()
+	runtimeMetrics.MemStats.HeapSys = NewGauge()
+	runtimeMetrics.MemStats.LastGC = NewGauge()
+	runtimeMetrics.MemStats.Lookups = NewGauge()
+	runtimeMetrics.MemStats.Mallocs = NewGauge()
+	runtimeMetrics.MemStats.MCacheInuse = NewGauge()
+	runtimeMetrics.MemStats.MCacheSys = NewGauge()
+	runtimeMetrics.MemStats.MSpanInuse = NewGauge()
+	runtimeMetrics.MemStats.MSpanSys = NewGauge()
+	runtimeMetrics.MemStats.NextGC = NewGauge()
+	runtimeMetrics.MemStats.NumGC = NewGauge()
+	runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64()
+	runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015))
+	runtimeMetrics.MemStats.PauseTotalNs = NewGauge()
+	runtimeMetrics.MemStats.StackInuse = NewGauge()
+	runtimeMetrics.MemStats.StackSys = NewGauge()
+	runtimeMetrics.MemStats.Sys = NewGauge()
+	runtimeMetrics.MemStats.TotalAlloc = NewGauge()
+	runtimeMetrics.NumCgoCall = NewGauge()
+	runtimeMetrics.NumGoroutine = NewGauge()
+	runtimeMetrics.NumThread = NewGauge()
+	runtimeMetrics.ReadMemStats = NewTimer()
+
+	r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc)
+	r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys)
+	r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC)
+	r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC)
+	r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees)
+	r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc)
+	r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle)
+	r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse)
+	r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects)
+	r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased)
+	r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys)
+	r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC)
+	r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups)
+	r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs)
+	r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse)
+	r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys)
+	r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse)
+	r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys)
+	r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC)
+	r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC)
+	r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction)
+	r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs)
+	r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs)
+	r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse)
+	r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys)
+	r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys)
+	r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc)
+	r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall)
+	r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine)
+	r.Register("runtime.NumThread", runtimeMetrics.NumThread)
+	r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
new file mode 100644
index 0000000..e3391f4
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
@@ -0,0 +1,10 @@
+// +build cgo
+// +build !appengine
+
+package metrics
+
+import "runtime"
+
+func numCgoCall() int64 {
+	return runtime.NumCgoCall()
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
new file mode 100644
index 0000000..ca12c05
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
@@ -0,0 +1,9 @@
+// +build go1.5
+
+package metrics
+
+import "runtime"
+
+func gcCPUFraction(memStats *runtime.MemStats) float64 {
+	return memStats.GCCPUFraction
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
new file mode 100644
index 0000000..616a3b4
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
@@ -0,0 +1,7 @@
+// +build !cgo appengine
+
+package metrics
+
+func numCgoCall() int64 {
+	return 0
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
new file mode 100644
index 0000000..be96aa6
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
@@ -0,0 +1,9 @@
+// +build !go1.5
+
+package metrics
+
+import "runtime"
+
+func gcCPUFraction(memStats *runtime.MemStats) float64 {
+	return 0
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/sample.go b/vendor/github.com/rcrowley/go-metrics/sample.go
new file mode 100644
index 0000000..fecee5e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/sample.go
@@ -0,0 +1,616 @@
+package metrics
+
+import (
+	"math"
+	"math/rand"
+	"sort"
+	"sync"
+	"time"
+)
+
+const rescaleThreshold = time.Hour
+
+// Samples maintain a statistically-significant selection of values from
+// a stream.
+type Sample interface {
+	Clear()
+	Count() int64
+	Max() int64
+	Mean() float64
+	Min() int64
+	Percentile(float64) float64
+	Percentiles([]float64) []float64
+	Size() int
+	Snapshot() Sample
+	StdDev() float64
+	Sum() int64
+	Update(int64)
+	Values() []int64
+	Variance() float64
+}
+
+// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
+// priority reservoir.  See Cormode et al's "Forward Decay: A Practical Time
+// Decay Model for Streaming Systems".
+//
+// <http://dimacs.rutgers.edu/~graham/pubs/papers/fwddecay.pdf>
+type ExpDecaySample struct {
+	alpha         float64
+	count         int64
+	mutex         sync.Mutex
+	reservoirSize int
+	t0, t1        time.Time
+	values        *expDecaySampleHeap
+}
+
+// NewExpDecaySample constructs a new exponentially-decaying sample with the
+// given reservoir size and alpha.
+func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
+	if UseNilMetrics {
+		return NilSample{}
+	}
+	s := &ExpDecaySample{
+		alpha:         alpha,
+		reservoirSize: reservoirSize,
+		t0:            time.Now(),
+		values:        newExpDecaySampleHeap(reservoirSize),
+	}
+	s.t1 = s.t0.Add(rescaleThreshold)
+	return s
+}
+
+// Clear clears all samples.
+func (s *ExpDecaySample) Clear() {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.count = 0
+	s.t0 = time.Now()
+	s.t1 = s.t0.Add(rescaleThreshold)
+	s.values.Clear()
+}
+
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *ExpDecaySample) Count() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return s.count
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Max() int64 {
+	return SampleMax(s.Values())
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *ExpDecaySample) Mean() float64 {
+	return SampleMean(s.Values())
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Min() int64 {
+	return SampleMin(s.Values())
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *ExpDecaySample) Percentile(p float64) float64 {
+	return SamplePercentile(s.Values(), p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
+	return SamplePercentiles(s.Values(), ps)
+}
+
+// Size returns the size of the sample, which is at most the reservoir size.
+func (s *ExpDecaySample) Size() int {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return s.values.Size()
+}
+
+// Snapshot returns a read-only copy of the sample.
+func (s *ExpDecaySample) Snapshot() Sample {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	vals := s.values.Values()
+	values := make([]int64, len(vals))
+	for i, v := range vals {
+		values[i] = v.v
+	}
+	return &SampleSnapshot{
+		count:  s.count,
+		values: values,
+	}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *ExpDecaySample) StdDev() float64 {
+	return SampleStdDev(s.Values())
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *ExpDecaySample) Sum() int64 {
+	return SampleSum(s.Values())
+}
+
+// Update samples a new value.
+func (s *ExpDecaySample) Update(v int64) {
+	s.update(time.Now(), v)
+}
+
+// Values returns a copy of the values in the sample.
+func (s *ExpDecaySample) Values() []int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	vals := s.values.Values()
+	values := make([]int64, len(vals))
+	for i, v := range vals {
+		values[i] = v.v
+	}
+	return values
+}
+
+// Variance returns the variance of the values in the sample.
+func (s *ExpDecaySample) Variance() float64 {
+	return SampleVariance(s.Values())
+}
+
+// update samples a new value at a particular timestamp.  This is a method all
+// its own to facilitate testing.
+func (s *ExpDecaySample) update(t time.Time, v int64) {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.count++
+	if s.values.Size() == s.reservoirSize {
+		s.values.Pop()
+	}
+	s.values.Push(expDecaySample{
+		k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
+		v: v,
+	})
+	if t.After(s.t1) {
+		values := s.values.Values()
+		t0 := s.t0
+		s.values.Clear()
+		s.t0 = t
+		s.t1 = s.t0.Add(rescaleThreshold)
+		for _, v := range values {
+			v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds())
+			s.values.Push(v)
+		}
+	}
+}
+
+// NilSample is a no-op Sample.
+type NilSample struct{}
+
+// Clear is a no-op.
+func (NilSample) Clear() {}
+
+// Count is a no-op.
+func (NilSample) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilSample) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilSample) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilSample) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilSample) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilSample) Percentiles(ps []float64) []float64 {
+	return make([]float64, len(ps))
+}
+
+// Size is a no-op.
+func (NilSample) Size() int { return 0 }
+
+// Sample is a no-op.
+func (NilSample) Snapshot() Sample { return NilSample{} }
+
+// StdDev is a no-op.
+func (NilSample) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilSample) Sum() int64 { return 0 }
+
+// Update is a no-op.
+func (NilSample) Update(v int64) {}
+
+// Values is a no-op.
+func (NilSample) Values() []int64 { return []int64{} }
+
+// Variance is a no-op.
+func (NilSample) Variance() float64 { return 0.0 }
+
+// SampleMax returns the maximum value of the slice of int64.
+func SampleMax(values []int64) int64 {
+	if 0 == len(values) {
+		return 0
+	}
+	var max int64 = math.MinInt64
+	for _, v := range values {
+		if max < v {
+			max = v
+		}
+	}
+	return max
+}
+
+// SampleMean returns the mean value of the slice of int64.
+func SampleMean(values []int64) float64 {
+	if 0 == len(values) {
+		return 0.0
+	}
+	return float64(SampleSum(values)) / float64(len(values))
+}
+
+// SampleMin returns the minimum value of the slice of int64.
+func SampleMin(values []int64) int64 {
+	if 0 == len(values) {
+		return 0
+	}
+	var min int64 = math.MaxInt64
+	for _, v := range values {
+		if min > v {
+			min = v
+		}
+	}
+	return min
+}
+
+// SamplePercentiles returns an arbitrary percentile of the slice of int64.
+func SamplePercentile(values int64Slice, p float64) float64 {
+	return SamplePercentiles(values, []float64{p})[0]
+}
+
+// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
+// int64.
+func SamplePercentiles(values int64Slice, ps []float64) []float64 {
+	scores := make([]float64, len(ps))
+	size := len(values)
+	if size > 0 {
+		sort.Sort(values)
+		for i, p := range ps {
+			pos := p * float64(size+1)
+			if pos < 1.0 {
+				scores[i] = float64(values[0])
+			} else if pos >= float64(size) {
+				scores[i] = float64(values[size-1])
+			} else {
+				lower := float64(values[int(pos)-1])
+				upper := float64(values[int(pos)])
+				scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
+			}
+		}
+	}
+	return scores
+}
+
+// SampleSnapshot is a read-only copy of another Sample.
+type SampleSnapshot struct {
+	count  int64
+	values []int64
+}
+
+func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot {
+	return &SampleSnapshot{
+		count:  count,
+		values: values,
+	}
+}
+
+// Clear panics.
+func (*SampleSnapshot) Clear() {
+	panic("Clear called on a SampleSnapshot")
+}
+
+// Count returns the count of inputs at the time the snapshot was taken.
+func (s *SampleSnapshot) Count() int64 { return s.count }
+
+// Max returns the maximal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
+
+// Mean returns the mean value at the time the snapshot was taken.
+func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
+
+// Min returns the minimal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
+
+// Percentile returns an arbitrary percentile of values at the time the
+// snapshot was taken.
+func (s *SampleSnapshot) Percentile(p float64) float64 {
+	return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values at the time
+// the snapshot was taken.
+func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
+	return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample at the time the snapshot was taken.
+func (s *SampleSnapshot) Size() int { return len(s.values) }
+
+// Snapshot returns the snapshot.
+func (s *SampleSnapshot) Snapshot() Sample { return s }
+
+// StdDev returns the standard deviation of values at the time the snapshot was
+// taken.
+func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
+
+// Sum returns the sum of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
+
+// Update panics.
+func (*SampleSnapshot) Update(int64) {
+	panic("Update called on a SampleSnapshot")
+}
+
+// Values returns a copy of the values in the sample.
+func (s *SampleSnapshot) Values() []int64 {
+	values := make([]int64, len(s.values))
+	copy(values, s.values)
+	return values
+}
+
+// Variance returns the variance of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
+
+// SampleStdDev returns the standard deviation of the slice of int64.
+func SampleStdDev(values []int64) float64 {
+	return math.Sqrt(SampleVariance(values))
+}
+
+// SampleSum returns the sum of the slice of int64.
+func SampleSum(values []int64) int64 {
+	var sum int64
+	for _, v := range values {
+		sum += v
+	}
+	return sum
+}
+
+// SampleVariance returns the variance of the slice of int64.
+func SampleVariance(values []int64) float64 {
+	if 0 == len(values) {
+		return 0.0
+	}
+	m := SampleMean(values)
+	var sum float64
+	for _, v := range values {
+		d := float64(v) - m
+		sum += d * d
+	}
+	return sum / float64(len(values))
+}
+
+// A uniform sample using Vitter's Algorithm R.
+//
+// <http://www.cs.umd.edu/~samir/498/vitter.pdf>
+type UniformSample struct {
+	count         int64
+	mutex         sync.Mutex
+	reservoirSize int
+	values        []int64
+}
+
+// NewUniformSample constructs a new uniform sample with the given reservoir
+// size.
+func NewUniformSample(reservoirSize int) Sample {
+	if UseNilMetrics {
+		return NilSample{}
+	}
+	return &UniformSample{
+		reservoirSize: reservoirSize,
+		values:        make([]int64, 0, reservoirSize),
+	}
+}
+
+// Clear clears all samples.
+func (s *UniformSample) Clear() {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.count = 0
+	s.values = make([]int64, 0, s.reservoirSize)
+}
+
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *UniformSample) Count() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return s.count
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *UniformSample) Max() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleMax(s.values)
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *UniformSample) Mean() float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleMean(s.values)
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *UniformSample) Min() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleMin(s.values)
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *UniformSample) Percentile(p float64) float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *UniformSample) Percentiles(ps []float64) []float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample, which is at most the reservoir size.
+func (s *UniformSample) Size() int {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return len(s.values)
+}
+
+// Snapshot returns a read-only copy of the sample.
+func (s *UniformSample) Snapshot() Sample {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	values := make([]int64, len(s.values))
+	copy(values, s.values)
+	return &SampleSnapshot{
+		count:  s.count,
+		values: values,
+	}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *UniformSample) StdDev() float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleStdDev(s.values)
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *UniformSample) Sum() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleSum(s.values)
+}
+
+// Update samples a new value.
+func (s *UniformSample) Update(v int64) {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.count++
+	if len(s.values) < s.reservoirSize {
+		s.values = append(s.values, v)
+	} else {
+		r := rand.Int63n(s.count)
+		if r < int64(len(s.values)) {
+			s.values[int(r)] = v
+		}
+	}
+}
+
+// Values returns a copy of the values in the sample.
+func (s *UniformSample) Values() []int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	values := make([]int64, len(s.values))
+	copy(values, s.values)
+	return values
+}
+
+// Variance returns the variance of the values in the sample.
+func (s *UniformSample) Variance() float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleVariance(s.values)
+}
+
+// expDecaySample represents an individual sample in a heap.
+type expDecaySample struct {
+	k float64
+	v int64
+}
+
+func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap {
+	return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)}
+}
+
+// expDecaySampleHeap is a min-heap of expDecaySamples.
+// The internal implementation is copied from the standard library's container/heap
+type expDecaySampleHeap struct {
+	s []expDecaySample
+}
+
+func (h *expDecaySampleHeap) Clear() {
+	h.s = h.s[:0]
+}
+
+func (h *expDecaySampleHeap) Push(s expDecaySample) {
+	n := len(h.s)
+	h.s = h.s[0 : n+1]
+	h.s[n] = s
+	h.up(n)
+}
+
+func (h *expDecaySampleHeap) Pop() expDecaySample {
+	n := len(h.s) - 1
+	h.s[0], h.s[n] = h.s[n], h.s[0]
+	h.down(0, n)
+
+	n = len(h.s)
+	s := h.s[n-1]
+	h.s = h.s[0 : n-1]
+	return s
+}
+
+func (h *expDecaySampleHeap) Size() int {
+	return len(h.s)
+}
+
+func (h *expDecaySampleHeap) Values() []expDecaySample {
+	return h.s
+}
+
+func (h *expDecaySampleHeap) up(j int) {
+	for {
+		i := (j - 1) / 2 // parent
+		if i == j || !(h.s[j].k < h.s[i].k) {
+			break
+		}
+		h.s[i], h.s[j] = h.s[j], h.s[i]
+		j = i
+	}
+}
+
+func (h *expDecaySampleHeap) down(i, n int) {
+	for {
+		j1 := 2*i + 1
+		if j1 >= n || j1 < 0 { // j1 < 0 after int overflow
+			break
+		}
+		j := j1 // left child
+		if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) {
+			j = j2 // = 2*i + 2  // right child
+		}
+		if !(h.s[j].k < h.s[i].k) {
+			break
+		}
+		h.s[i], h.s[j] = h.s[j], h.s[i]
+		i = j
+	}
+}
+
+type int64Slice []int64
+
+func (p int64Slice) Len() int           { return len(p) }
+func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p int64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/rcrowley/go-metrics/syslog.go b/vendor/github.com/rcrowley/go-metrics/syslog.go
new file mode 100644
index 0000000..693f190
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/syslog.go
@@ -0,0 +1,78 @@
+// +build !windows
+
+package metrics
+
+import (
+	"fmt"
+	"log/syslog"
+	"time"
+)
+
+// Output each metric in the given registry to syslog periodically using
+// the given syslogger.
+func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
+	for _ = range time.Tick(d) {
+		r.Each(func(name string, i interface{}) {
+			switch metric := i.(type) {
+			case Counter:
+				w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count()))
+			case Gauge:
+				w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value()))
+			case GaugeFloat64:
+				w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value()))
+			case Healthcheck:
+				metric.Check()
+				w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error()))
+			case Histogram:
+				h := metric.Snapshot()
+				ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+				w.Info(fmt.Sprintf(
+					"histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f",
+					name,
+					h.Count(),
+					h.Min(),
+					h.Max(),
+					h.Mean(),
+					h.StdDev(),
+					ps[0],
+					ps[1],
+					ps[2],
+					ps[3],
+					ps[4],
+				))
+			case Meter:
+				m := metric.Snapshot()
+				w.Info(fmt.Sprintf(
+					"meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f",
+					name,
+					m.Count(),
+					m.Rate1(),
+					m.Rate5(),
+					m.Rate15(),
+					m.RateMean(),
+				))
+			case Timer:
+				t := metric.Snapshot()
+				ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+				w.Info(fmt.Sprintf(
+					"timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f",
+					name,
+					t.Count(),
+					t.Min(),
+					t.Max(),
+					t.Mean(),
+					t.StdDev(),
+					ps[0],
+					ps[1],
+					ps[2],
+					ps[3],
+					ps[4],
+					t.Rate1(),
+					t.Rate5(),
+					t.Rate15(),
+					t.RateMean(),
+				))
+			}
+		})
+	}
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/timer.go b/vendor/github.com/rcrowley/go-metrics/timer.go
new file mode 100644
index 0000000..d6ec4c6
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/timer.go
@@ -0,0 +1,329 @@
+package metrics
+
+import (
+	"sync"
+	"time"
+)
+
+// Timers capture the duration and rate of events.
+type Timer interface {
+	Count() int64
+	Max() int64
+	Mean() float64
+	Min() int64
+	Percentile(float64) float64
+	Percentiles([]float64) []float64
+	Rate1() float64
+	Rate5() float64
+	Rate15() float64
+	RateMean() float64
+	Snapshot() Timer
+	StdDev() float64
+	Stop()
+	Sum() int64
+	Time(func())
+	Update(time.Duration)
+	UpdateSince(time.Time)
+	Variance() float64
+}
+
+// GetOrRegisterTimer returns an existing Timer or constructs and registers a
+// new StandardTimer.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func GetOrRegisterTimer(name string, r Registry) Timer {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, NewTimer).(Timer)
+}
+
+// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
+// Be sure to call Stop() once the timer is of no use to allow for garbage collection.
+func NewCustomTimer(h Histogram, m Meter) Timer {
+	if UseNilMetrics {
+		return NilTimer{}
+	}
+	return &StandardTimer{
+		histogram: h,
+		meter:     m,
+	}
+}
+
+// NewRegisteredTimer constructs and registers a new StandardTimer.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func NewRegisteredTimer(name string, r Registry) Timer {
+	c := NewTimer()
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// NewTimer constructs a new StandardTimer using an exponentially-decaying
+// sample with the same reservoir size and alpha as UNIX load averages.
+// Be sure to call Stop() once the timer is of no use to allow for garbage collection.
+func NewTimer() Timer {
+	if UseNilMetrics {
+		return NilTimer{}
+	}
+	return &StandardTimer{
+		histogram: NewHistogram(NewExpDecaySample(1028, 0.015)),
+		meter:     NewMeter(),
+	}
+}
+
+// NilTimer is a no-op Timer.
+type NilTimer struct {
+	h Histogram
+	m Meter
+}
+
+// Count is a no-op.
+func (NilTimer) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilTimer) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilTimer) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilTimer) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilTimer) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilTimer) Percentiles(ps []float64) []float64 {
+	return make([]float64, len(ps))
+}
+
+// Rate1 is a no-op.
+func (NilTimer) Rate1() float64 { return 0.0 }
+
+// Rate5 is a no-op.
+func (NilTimer) Rate5() float64 { return 0.0 }
+
+// Rate15 is a no-op.
+func (NilTimer) Rate15() float64 { return 0.0 }
+
+// RateMean is a no-op.
+func (NilTimer) RateMean() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilTimer) Snapshot() Timer { return NilTimer{} }
+
+// StdDev is a no-op.
+func (NilTimer) StdDev() float64 { return 0.0 }
+
+// Stop is a no-op.
+func (NilTimer) Stop() {}
+
+// Sum is a no-op.
+func (NilTimer) Sum() int64 { return 0 }
+
+// Time is a no-op.
+func (NilTimer) Time(func()) {}
+
+// Update is a no-op.
+func (NilTimer) Update(time.Duration) {}
+
+// UpdateSince is a no-op.
+func (NilTimer) UpdateSince(time.Time) {}
+
+// Variance is a no-op.
+func (NilTimer) Variance() float64 { return 0.0 }
+
+// StandardTimer is the standard implementation of a Timer and uses a Histogram
+// and Meter.
+type StandardTimer struct {
+	histogram Histogram
+	meter     Meter
+	mutex     sync.Mutex
+}
+
+// Count returns the number of events recorded.
+func (t *StandardTimer) Count() int64 {
+	return t.histogram.Count()
+}
+
+// Max returns the maximum value in the sample.
+func (t *StandardTimer) Max() int64 {
+	return t.histogram.Max()
+}
+
+// Mean returns the mean of the values in the sample.
+func (t *StandardTimer) Mean() float64 {
+	return t.histogram.Mean()
+}
+
+// Min returns the minimum value in the sample.
+func (t *StandardTimer) Min() int64 {
+	return t.histogram.Min()
+}
+
+// Percentile returns an arbitrary percentile of the values in the sample.
+func (t *StandardTimer) Percentile(p float64) float64 {
+	return t.histogram.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
+func (t *StandardTimer) Percentiles(ps []float64) []float64 {
+	return t.histogram.Percentiles(ps)
+}
+
+// Rate1 returns the one-minute moving average rate of events per second.
+func (t *StandardTimer) Rate1() float64 {
+	return t.meter.Rate1()
+}
+
+// Rate5 returns the five-minute moving average rate of events per second.
+func (t *StandardTimer) Rate5() float64 {
+	return t.meter.Rate5()
+}
+
+// Rate15 returns the fifteen-minute moving average rate of events per second.
+func (t *StandardTimer) Rate15() float64 {
+	return t.meter.Rate15()
+}
+
+// RateMean returns the meter's mean rate of events per second.
+func (t *StandardTimer) RateMean() float64 {
+	return t.meter.RateMean()
+}
+
+// Snapshot returns a read-only copy of the timer.
+func (t *StandardTimer) Snapshot() Timer {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	return &TimerSnapshot{
+		histogram: t.histogram.Snapshot().(*HistogramSnapshot),
+		meter:     t.meter.Snapshot().(*MeterSnapshot),
+	}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (t *StandardTimer) StdDev() float64 {
+	return t.histogram.StdDev()
+}
+
+// Stop stops the meter.
+func (t *StandardTimer) Stop() {
+	t.meter.Stop()
+}
+
+// Sum returns the sum in the sample.
+func (t *StandardTimer) Sum() int64 {
+	return t.histogram.Sum()
+}
+
+// Record the duration of the execution of the given function.
+func (t *StandardTimer) Time(f func()) {
+	ts := time.Now()
+	f()
+	t.Update(time.Since(ts))
+}
+
+// Record the duration of an event.
+func (t *StandardTimer) Update(d time.Duration) {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	t.histogram.Update(int64(d))
+	t.meter.Mark(1)
+}
+
+// Record the duration of an event that started at a time and ends now.
+func (t *StandardTimer) UpdateSince(ts time.Time) {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	t.histogram.Update(int64(time.Since(ts)))
+	t.meter.Mark(1)
+}
+
+// Variance returns the variance of the values in the sample.
+func (t *StandardTimer) Variance() float64 {
+	return t.histogram.Variance()
+}
+
+// TimerSnapshot is a read-only copy of another Timer.
+type TimerSnapshot struct {
+	histogram *HistogramSnapshot
+	meter     *MeterSnapshot
+}
+
+// Count returns the number of events recorded at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
+
+// Max returns the maximum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
+
+// Mean returns the mean value at the time the snapshot was taken.
+func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
+
+// Min returns the minimum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
+
+// Percentile returns an arbitrary percentile of sampled values at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) Percentile(p float64) float64 {
+	return t.histogram.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of sampled values at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
+	return t.histogram.Percentiles(ps)
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
+
+// Snapshot returns the snapshot.
+func (t *TimerSnapshot) Snapshot() Timer { return t }
+
+// StdDev returns the standard deviation of the values at the time the snapshot
+// was taken.
+func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
+
+// Stop is a no-op.
+func (t *TimerSnapshot) Stop() {}
+
+// Sum returns the sum at the time the snapshot was taken.
+func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
+
+// Time panics.
+func (*TimerSnapshot) Time(func()) {
+	panic("Time called on a TimerSnapshot")
+}
+
+// Update panics.
+func (*TimerSnapshot) Update(time.Duration) {
+	panic("Update called on a TimerSnapshot")
+}
+
+// UpdateSince panics.
+func (*TimerSnapshot) UpdateSince(time.Time) {
+	panic("UpdateSince called on a TimerSnapshot")
+}
+
+// Variance returns the variance of the values at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }
diff --git a/vendor/github.com/rcrowley/go-metrics/validate.sh b/vendor/github.com/rcrowley/go-metrics/validate.sh
new file mode 100755
index 0000000..c4ae91e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/validate.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -e
+
+# check there are no formatting issues
+GOFMT_LINES=`gofmt -l . | wc -l | xargs`
+test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues"
+
+# run the tests for the root package
+go test -race .
diff --git a/vendor/github.com/rcrowley/go-metrics/writer.go b/vendor/github.com/rcrowley/go-metrics/writer.go
new file mode 100644
index 0000000..091e971
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/writer.go
@@ -0,0 +1,100 @@
+package metrics
+
+import (
+	"fmt"
+	"io"
+	"sort"
+	"time"
+)
+
+// Write sorts writes each metric in the given registry periodically to the
+// given io.Writer.
+func Write(r Registry, d time.Duration, w io.Writer) {
+	for _ = range time.Tick(d) {
+		WriteOnce(r, w)
+	}
+}
+
+// WriteOnce sorts and writes metrics in the given registry to the given
+// io.Writer.
+func WriteOnce(r Registry, w io.Writer) {
+	var namedMetrics namedMetricSlice
+	r.Each(func(name string, i interface{}) {
+		namedMetrics = append(namedMetrics, namedMetric{name, i})
+	})
+
+	sort.Sort(namedMetrics)
+	for _, namedMetric := range namedMetrics {
+		switch metric := namedMetric.m.(type) {
+		case Counter:
+			fmt.Fprintf(w, "counter %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  count:       %9d\n", metric.Count())
+		case Gauge:
+			fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  value:       %9d\n", metric.Value())
+		case GaugeFloat64:
+			fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  value:       %f\n", metric.Value())
+		case Healthcheck:
+			metric.Check()
+			fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  error:       %v\n", metric.Error())
+		case Histogram:
+			h := metric.Snapshot()
+			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			fmt.Fprintf(w, "histogram %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  count:       %9d\n", h.Count())
+			fmt.Fprintf(w, "  min:         %9d\n", h.Min())
+			fmt.Fprintf(w, "  max:         %9d\n", h.Max())
+			fmt.Fprintf(w, "  mean:        %12.2f\n", h.Mean())
+			fmt.Fprintf(w, "  stddev:      %12.2f\n", h.StdDev())
+			fmt.Fprintf(w, "  median:      %12.2f\n", ps[0])
+			fmt.Fprintf(w, "  75%%:         %12.2f\n", ps[1])
+			fmt.Fprintf(w, "  95%%:         %12.2f\n", ps[2])
+			fmt.Fprintf(w, "  99%%:         %12.2f\n", ps[3])
+			fmt.Fprintf(w, "  99.9%%:       %12.2f\n", ps[4])
+		case Meter:
+			m := metric.Snapshot()
+			fmt.Fprintf(w, "meter %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  count:       %9d\n", m.Count())
+			fmt.Fprintf(w, "  1-min rate:  %12.2f\n", m.Rate1())
+			fmt.Fprintf(w, "  5-min rate:  %12.2f\n", m.Rate5())
+			fmt.Fprintf(w, "  15-min rate: %12.2f\n", m.Rate15())
+			fmt.Fprintf(w, "  mean rate:   %12.2f\n", m.RateMean())
+		case Timer:
+			t := metric.Snapshot()
+			ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			fmt.Fprintf(w, "timer %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  count:       %9d\n", t.Count())
+			fmt.Fprintf(w, "  min:         %9d\n", t.Min())
+			fmt.Fprintf(w, "  max:         %9d\n", t.Max())
+			fmt.Fprintf(w, "  mean:        %12.2f\n", t.Mean())
+			fmt.Fprintf(w, "  stddev:      %12.2f\n", t.StdDev())
+			fmt.Fprintf(w, "  median:      %12.2f\n", ps[0])
+			fmt.Fprintf(w, "  75%%:         %12.2f\n", ps[1])
+			fmt.Fprintf(w, "  95%%:         %12.2f\n", ps[2])
+			fmt.Fprintf(w, "  99%%:         %12.2f\n", ps[3])
+			fmt.Fprintf(w, "  99.9%%:       %12.2f\n", ps[4])
+			fmt.Fprintf(w, "  1-min rate:  %12.2f\n", t.Rate1())
+			fmt.Fprintf(w, "  5-min rate:  %12.2f\n", t.Rate5())
+			fmt.Fprintf(w, "  15-min rate: %12.2f\n", t.Rate15())
+			fmt.Fprintf(w, "  mean rate:   %12.2f\n", t.RateMean())
+		}
+	}
+}
+
+type namedMetric struct {
+	name string
+	m    interface{}
+}
+
+// namedMetricSlice is a slice of namedMetrics that implements sort.Interface.
+type namedMetricSlice []namedMetric
+
+func (nms namedMetricSlice) Len() int { return len(nms) }
+
+func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] }
+
+func (nms namedMetricSlice) Less(i, j int) bool {
+	return nms[i].name < nms[j].name
+}
diff --git a/vendor/github.com/rs/zerolog/.gitignore b/vendor/github.com/rs/zerolog/.gitignore
new file mode 100644
index 0000000..8ebe58b
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+tmp
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/rs/zerolog/.travis.yml b/vendor/github.com/rs/zerolog/.travis.yml
new file mode 100644
index 0000000..64d202a
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+go:
+- "1.7"
+- "1.8"
+- "1.9"
+- "1.10"
+- "master"
+matrix:
+  allow_failures:
+      - go: "master"
+script:
+  - go test -v -race -cpu=1,2,4 -bench . -benchmem ./...
+  - go test -v -tags binary_log -race -cpu=1,2,4 -bench . -benchmem ./...
diff --git a/vendor/github.com/rs/zerolog/CNAME b/vendor/github.com/rs/zerolog/CNAME
new file mode 100644
index 0000000..9ce57a6
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/CNAME
@@ -0,0 +1 @@
+zerolog.io
\ No newline at end of file
diff --git a/vendor/github.com/rs/zerolog/LICENSE b/vendor/github.com/rs/zerolog/LICENSE
new file mode 100644
index 0000000..677e07f
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2017 Olivier Poitrey
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/rs/zerolog/README.md b/vendor/github.com/rs/zerolog/README.md
new file mode 100644
index 0000000..eefc1f6
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/README.md
@@ -0,0 +1,591 @@
+# Zero Allocation JSON Logger
+
+[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/zerolog) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/zerolog/master/LICENSE) [![Build Status](https://travis-ci.org/rs/zerolog.svg?branch=master)](https://travis-ci.org/rs/zerolog) [![Coverage](http://gocover.io/_badge/github.com/rs/zerolog)](http://gocover.io/github.com/rs/zerolog)
+
+The zerolog package provides a fast and simple logger dedicated to JSON output.
+
+Zerolog's API is designed to provide both a great developer experience and stunning [performance](#benchmarks). Its unique chaining API allows zerolog to write JSON (or CBOR) log events by avoiding allocations and reflection.
+
+Uber's [zap](https://godoc.org/go.uber.org/zap) library pioneered this approach. Zerolog is taking this concept to the next level with a simpler to use API and even better performance.
+
+To keep the code base and the API simple, zerolog focuses on efficient structured logging only. Pretty logging on the console is made possible using the provided (but inefficient) [`zerolog.ConsoleWriter`](#pretty-logging).
+
+![Pretty Logging Image](pretty.png)
+
+## Who uses zerolog
+
+Find out [who uses zerolog](https://github.com/rs/zerolog/wiki/Who-uses-zerolog) and add your company / project to the list.
+
+## Features
+
+* Blazing fast
+* Low to zero allocation
+* Level logging
+* Sampling
+* Hooks
+* Contextual fields
+* `context.Context` integration
+* `net/http` helpers
+* JSON and CBOR encoding formats
+* Pretty logging for development
+
+## Installation
+
+```go
+go get -u github.com/rs/zerolog/log
+```
+
+## Getting Started
+
+### Simple Logging Example
+
+For simple logging, import the global logger package **github.com/rs/zerolog/log**
+
+```go
+package main
+
+import (
+    "github.com/rs/zerolog"
+    "github.com/rs/zerolog/log"
+)
+
+func main() {
+    // UNIX Time is faster and smaller than most timestamps
+    // If you set zerolog.TimeFieldFormat to an empty string,
+    // logs will write with UNIX time
+    zerolog.TimeFieldFormat = ""
+
+    log.Print("hello world")
+}
+
+// Output: {"time":1516134303,"level":"debug","message":"hello world"}
+```
+> Note: By default log writes to `os.Stderr`
+> Note: The default log level for `log.Print` is *debug*
+
+### Contextual Logging
+
+**zerolog** allows data to be added to log messages in the form of key:value pairs. The data added to the message adds "context" about the log event that can be critical for debugging as well as myriad other purposes. An example of this is below:
+
+```go
+package main
+
+import (
+    "github.com/rs/zerolog"
+    "github.com/rs/zerolog/log"
+)
+
+func main() {
+    zerolog.TimeFieldFormat = ""
+
+    log.Debug().
+        Str("Scale", "833 cents").
+        Float64("Interval", 833.09).
+        Msg("Fibonacci is everywhere")
+}
+
+// Output: {"time":1524104936,"level":"debug","Scale":"833 cents","Interval":833.09,"message":"Fibonacci is everywhere"}
+```
+
+> You'll note in the above example that when adding contextual fields, the fields are strongly typed. You can find the full list of supported fields [here](#standard-types)
+
+### Leveled Logging
+
+#### Simple Leveled Logging Example
+
+```go
+package main
+
+import (
+    "github.com/rs/zerolog"
+    "github.com/rs/zerolog/log"
+)
+
+func main() {
+    zerolog.TimeFieldFormat = ""
+
+    log.Info().Msg("hello world")
+}
+
+// Output: {"time":1516134303,"level":"info","message":"hello world"}
+```
+
+> It is very important to note that when using the **zerolog** chaining API, as shown above (`log.Info().Msg("hello world"`), the chain must have either the `Msg` or `Msgf` method call. If you forget to add either of these, the log will not occur and there is no compile time error to alert you of this.
+
+**zerolog** allows for logging at the following levels (from highest to lowest):
+
+* panic (`zerolog.PanicLevel`, 5)
+* fatal (`zerolog.FatalLevel`, 4)
+* error (`zerolog.ErrorLevel`, 3)
+* warn (`zerolog.WarnLevel`, 2)
+* info (`zerolog.InfoLevel`, 1)
+* debug (`zerolog.DebugLevel`, 0)
+
+You can set the Global logging level to any of these options using the `SetGlobalLevel` function in the zerolog package, passing in one of the given constants above, e.g. `zerolog.InfoLevel` would be the "info" level.  Whichever level is chosen, all logs with a level greater than or equal to that level will be written. To turn off logging entirely, pass the `zerolog.Disabled` constant.
+
+#### Setting Global Log Level
+
+This example uses command-line flags to demonstrate various outputs depending on the chosen log level.
+
+```go
+package main
+
+import (
+    "flag"
+
+    "github.com/rs/zerolog"
+    "github.com/rs/zerolog/log"
+)
+
+func main() {
+    zerolog.TimeFieldFormat = ""
+    debug := flag.Bool("debug", false, "sets log level to debug")
+
+    flag.Parse()
+
+    // Default level for this example is info, unless debug flag is present
+    zerolog.SetGlobalLevel(zerolog.InfoLevel)
+    if *debug {
+        zerolog.SetGlobalLevel(zerolog.DebugLevel)
+    }
+
+    log.Debug().Msg("This message appears only when log level set to Debug")
+    log.Info().Msg("This message appears when log level set to Debug or Info")
+
+    if e := log.Debug(); e.Enabled() {
+        // Compute log output only if enabled.
+        value := "bar"
+        e.Str("foo", value).Msg("some debug message")
+    }
+}
+```
+
+Info Output (no flag)
+
+```bash
+$ ./logLevelExample
+{"time":1516387492,"level":"info","message":"This message appears when log level set to Debug or Info"}
+```
+
+Debug Output (debug flag set)
+
+```bash
+$ ./logLevelExample -debug
+{"time":1516387573,"level":"debug","message":"This message appears only when log level set to Debug"}
+{"time":1516387573,"level":"info","message":"This message appears when log level set to Debug or Info"}
+{"time":1516387573,"level":"debug","foo":"bar","message":"some debug message"}
+```
+
+#### Logging without Level or Message
+
+You may choose to log without a specific level by using the `Log` method. You may also write without a message by setting an empty string in the `msg string` parameter of the `Msg` method. Both are demonstrated in the example below.
+
+```go
+package main
+
+import (
+    "github.com/rs/zerolog"
+    "github.com/rs/zerolog/log"
+)
+
+func main() {
+    zerolog.TimeFieldFormat = ""
+
+    log.Log().
+        Str("foo", "bar").
+        Msg("")
+}
+
+// Output: {"time":1494567715,"foo":"bar"}
+```
+
+#### Logging Fatal Messages
+
+```go
+package main
+
+import (
+    "errors"
+
+    "github.com/rs/zerolog"
+    "github.com/rs/zerolog/log"
+)
+
+func main() {
+    err := errors.New("A repo man spends his life getting into tense situations")
+    service := "myservice"
+
+    zerolog.TimeFieldFormat = ""
+
+    log.Fatal().
+        Err(err).
+        Str("service", service).
+        Msgf("Cannot start %s", service)
+}
+
+// Output: {"time":1516133263,"level":"fatal","error":"A repo man spends his life getting into tense situations","service":"myservice","message":"Cannot start myservice"}
+//         exit status 1
+```
+
+> NOTE: Using `Msgf` generates one allocation even when the logger is disabled.
+
+### Create logger instance to manage different outputs
+
+```go
+logger := zerolog.New(os.Stderr).With().Timestamp().Logger()
+
+logger.Info().Str("foo", "bar").Msg("hello world")
+
+// Output: {"level":"info","time":1494567715,"message":"hello world","foo":"bar"}
+```
+
+### Sub-loggers let you chain loggers with additional context
+
+```go
+sublogger := log.With().
+                 Str("component", "foo").
+                 Logger()
+sublogger.Info().Msg("hello world")
+
+// Output: {"level":"info","time":1494567715,"message":"hello world","component":"foo"}
+```
+
+### Pretty logging
+
+To log a human-friendly, colorized output, use `zerolog.ConsoleWriter`:
+
+```go
+log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})
+
+log.Info().Str("foo", "bar").Msg("Hello world")
+
+// Output: 3:04PM INF Hello World foo=bar
+```
+
+To customize the configuration and formatting:
+
+```go
+output := zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: time.RFC3339}
+output.FormatLevel = func(i interface{}) string {
+    return strings.ToUpper(fmt.Sprintf("| %-6s|", i))
+}
+output.FormatMessage = func(i interface{}) string {
+    return fmt.Sprintf("***%s****", i)
+}
+output.FormatFieldName = func(i interface{}) string {
+    return fmt.Sprintf("%s:", i)
+}
+output.FormatFieldValue = func(i interface{}) string {
+    return strings.ToUpper(fmt.Sprintf("%s", i))
+}
+
+log := zerolog.New(output).With().Timestamp().Logger()
+
+log.Info().Str("foo", "bar").Msg("Hello World")
+
+// Output: 2006-01-02T15:04:05Z07:00 | INFO  | ***Hello World**** foo:BAR
+```
+
+### Sub dictionary
+
+```go
+log.Info().
+    Str("foo", "bar").
+    Dict("dict", zerolog.Dict().
+        Str("bar", "baz").
+        Int("n", 1),
+    ).Msg("hello world")
+
+// Output: {"level":"info","time":1494567715,"foo":"bar","dict":{"bar":"baz","n":1},"message":"hello world"}
+```
+
+### Customize automatic field names
+
+```go
+zerolog.TimestampFieldName = "t"
+zerolog.LevelFieldName = "l"
+zerolog.MessageFieldName = "m"
+
+log.Info().Msg("hello world")
+
+// Output: {"l":"info","t":1494567715,"m":"hello world"}
+```
+
+### Add contextual fields to the global logger
+
+```go
+log.Logger = log.With().Str("foo", "bar").Logger()
+```
+
+### Add file and line number to log
+
+```go
+log.Logger = log.With().Caller().Logger()
+log.Info().Msg("hello world")
+
+// Output: {"level": "info", "message": "hello world", "caller": "/go/src/your_project/some_file:21"}
+```
+
+
+### Thread-safe, lock-free, non-blocking writer
+
+If your writer might be slow or not thread-safe and you need your log producers to never get slowed down by a slow writer, you can use a `diode.Writer` as follow:
+
+```go
+wr := diode.NewWriter(os.Stdout, 1000, 10*time.Millisecond, func(missed int) {
+		fmt.Printf("Logger Dropped %d messages", missed)
+	})
+log := zerolog.New(w)
+log.Print("test")
+```
+
+You will need to install `code.cloudfoundry.org/go-diodes` to use this feature.
+
+### Log Sampling
+
+```go
+sampled := log.Sample(&zerolog.BasicSampler{N: 10})
+sampled.Info().Msg("will be logged every 10 messages")
+
+// Output: {"time":1494567715,"level":"info","message":"will be logged every 10 messages"}
+```
+
+More advanced sampling:
+
+```go
+// Will let 5 debug messages per period of 1 second.
+// Over 5 debug message, 1 every 100 debug messages are logged.
+// Other levels are not sampled.
+sampled := log.Sample(zerolog.LevelSampler{
+    DebugSampler: &zerolog.BurstSampler{
+        Burst: 5,
+        Period: 1*time.Second,
+        NextSampler: &zerolog.BasicSampler{N: 100},
+    },
+})
+sampled.Debug().Msg("hello world")
+
+// Output: {"time":1494567715,"level":"debug","message":"hello world"}
+```
+
+### Hooks
+
+```go
+type SeverityHook struct{}
+
+func (h SeverityHook) Run(e *zerolog.Event, level zerolog.Level, msg string) {
+    if level != zerolog.NoLevel {
+        e.Str("severity", level.String())
+    }
+}
+
+hooked := log.Hook(SeverityHook{})
+hooked.Warn().Msg("")
+
+// Output: {"level":"warn","severity":"warn"}
+```
+
+### Pass a sub-logger by context
+
+```go
+ctx := log.With().Str("component", "module").Logger().WithContext(ctx)
+
+log.Ctx(ctx).Info().Msg("hello world")
+
+// Output: {"component":"module","level":"info","message":"hello world"}
+```
+
+### Set as standard logger output
+
+```go
+log := zerolog.New(os.Stdout).With().
+    Str("foo", "bar").
+    Logger()
+
+stdlog.SetFlags(0)
+stdlog.SetOutput(log)
+
+stdlog.Print("hello world")
+
+// Output: {"foo":"bar","message":"hello world"}
+```
+
+### Integration with `net/http`
+
+The `github.com/rs/zerolog/hlog` package provides some helpers to integrate zerolog with `http.Handler`.
+
+In this example we use [alice](https://github.com/justinas/alice) to install logger for better readability.
+
+```go
+log := zerolog.New(os.Stdout).With().
+    Timestamp().
+    Str("role", "my-service").
+    Str("host", host).
+    Logger()
+
+c := alice.New()
+
+// Install the logger handler with default output on the console
+c = c.Append(hlog.NewHandler(log))
+
+// Install some provided extra handler to set some request's context fields.
+// Thanks to those handler, all our logs will come with some pre-populated fields.
+c = c.Append(hlog.AccessHandler(func(r *http.Request, status, size int, duration time.Duration) {
+    hlog.FromRequest(r).Info().
+        Str("method", r.Method).
+        Str("url", r.URL.String()).
+        Int("status", status).
+        Int("size", size).
+        Dur("duration", duration).
+        Msg("")
+}))
+c = c.Append(hlog.RemoteAddrHandler("ip"))
+c = c.Append(hlog.UserAgentHandler("user_agent"))
+c = c.Append(hlog.RefererHandler("referer"))
+c = c.Append(hlog.RequestIDHandler("req_id", "Request-Id"))
+
+// Here is your final handler
+h := c.Then(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+    // Get the logger from the request's context. You can safely assume it
+    // will be always there: if the handler is removed, hlog.FromRequest
+    // will return a no-op logger.
+    hlog.FromRequest(r).Info().
+        Str("user", "current user").
+        Str("status", "ok").
+        Msg("Something happened")
+
+    // Output: {"level":"info","time":"2001-02-03T04:05:06Z","role":"my-service","host":"local-hostname","req_id":"b4g0l5t6tfid6dtrapu0","user":"current user","status":"ok","message":"Something happened"}
+}))
+http.Handle("/", h)
+
+if err := http.ListenAndServe(":8080", nil); err != nil {
+    log.Fatal().Err(err).Msg("Startup failed")
+}
+```
+
+## Global Settings
+
+Some settings can be changed and will by applied to all loggers:
+
+* `log.Logger`: You can set this value to customize the global logger (the one used by package level methods).
+* `zerolog.SetGlobalLevel`: Can raise the minimum level of all loggers. Set this to `zerolog.Disabled` to disable logging altogether (quiet mode).
+* `zerolog.DisableSampling`: If argument is `true`, all sampled loggers will stop sampling and issue 100% of their log events.
+* `zerolog.TimestampFieldName`: Can be set to customize `Timestamp` field name.
+* `zerolog.LevelFieldName`: Can be set to customize level field name.
+* `zerolog.MessageFieldName`: Can be set to customize message field name.
+* `zerolog.ErrorFieldName`: Can be set to customize `Err` field name.
+* `zerolog.TimeFieldFormat`: Can be set to customize `Time` field value formatting. If set with an empty string, times are formated as UNIX timestamp.
+    // DurationFieldUnit defines the unit for time.Duration type fields added
+    // using the Dur method.
+* `DurationFieldUnit`: Sets the unit of the fields added by `Dur` (default: `time.Millisecond`).
+* `DurationFieldInteger`: If set to true, `Dur` fields are formatted as integers instead of floats.
+* `ErrorHandler`: Called whenever zerolog fails to write an event on its output. If not set, an error is printed on the stderr. This handler must be thread safe and non-blocking.
+
+## Field Types
+
+### Standard Types
+
+* `Str`
+* `Bool`
+* `Int`, `Int8`, `Int16`, `Int32`, `Int64`
+* `Uint`, `Uint8`, `Uint16`, `Uint32`, `Uint64`
+* `Float32`, `Float64`
+
+### Advanced Fields
+
+* `Err`: Takes an `error` and render it as a string using the `zerolog.ErrorFieldName` field name.
+* `Timestamp`: Insert a timestamp field with `zerolog.TimestampFieldName` field name and formatted using `zerolog.TimeFieldFormat`.
+* `Time`: Adds a field with the time formated with the `zerolog.TimeFieldFormat`.
+* `Dur`: Adds a field with a `time.Duration`.
+* `Dict`: Adds a sub-key/value as a field of the event.
+* `Interface`: Uses reflection to marshal the type.
+
+## Binary Encoding
+
+In addition to the default JSON encoding, `zerolog` can produce binary logs using [CBOR](http://cbor.io) encoding. The choice of encoding can be decided at compile time using the build tag `binary_log` as follows:
+
+```bash
+go build -tags binary_log .
+```
+
+To Decode binary encoded log files you can use any CBOR decoder. One has been tested to work
+with zerolog library is [CSD](https://github.com/toravir/csd/).
+
+## Related Projects
+
+* [grpc-zerolog](https://github.com/cheapRoc/grpc-zerolog): Implementation of `grpclog.LoggerV2` interface using `zerolog`
+
+## Benchmarks
+
+See [logbench](http://hackemist.com/logbench/) for more comprehensive and up-to-date benchmarks.
+
+All operations are allocation free (those numbers *include* JSON encoding):
+
+```text
+BenchmarkLogEmpty-8        100000000    19.1 ns/op     0 B/op       0 allocs/op
+BenchmarkDisabled-8        500000000    4.07 ns/op     0 B/op       0 allocs/op
+BenchmarkInfo-8            30000000     42.5 ns/op     0 B/op       0 allocs/op
+BenchmarkContextFields-8   30000000     44.9 ns/op     0 B/op       0 allocs/op
+BenchmarkLogFields-8       10000000     184 ns/op      0 B/op       0 allocs/op
+```
+
+There are a few Go logging benchmarks and comparisons that include zerolog.
+
+* [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench)
+* [uber-common/zap](https://github.com/uber-go/zap#performance)
+
+Using Uber's zap comparison benchmark:
+
+Log a message and 10 fields:
+
+| Library | Time | Bytes Allocated | Objects Allocated |
+| :--- | :---: | :---: | :---: |
+| zerolog | 767 ns/op | 552 B/op | 6 allocs/op |
+| :zap: zap | 848 ns/op | 704 B/op | 2 allocs/op |
+| :zap: zap (sugared) | 1363 ns/op | 1610 B/op | 20 allocs/op |
+| go-kit | 3614 ns/op | 2895 B/op | 66 allocs/op |
+| lion | 5392 ns/op | 5807 B/op | 63 allocs/op |
+| logrus | 5661 ns/op | 6092 B/op | 78 allocs/op |
+| apex/log | 15332 ns/op | 3832 B/op | 65 allocs/op |
+| log15 | 20657 ns/op | 5632 B/op | 93 allocs/op |
+
+Log a message with a logger that already has 10 fields of context:
+
+| Library | Time | Bytes Allocated | Objects Allocated |
+| :--- | :---: | :---: | :---: |
+| zerolog | 52 ns/op | 0 B/op | 0 allocs/op |
+| :zap: zap | 283 ns/op | 0 B/op | 0 allocs/op |
+| :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op |
+| lion | 2702 ns/op | 4074 B/op | 38 allocs/op |
+| go-kit | 3378 ns/op | 3046 B/op | 52 allocs/op |
+| logrus | 4309 ns/op | 4564 B/op | 63 allocs/op |
+| apex/log | 13456 ns/op | 2898 B/op | 51 allocs/op |
+| log15 | 14179 ns/op | 2642 B/op | 44 allocs/op |
+
+Log a static string, without any context or `printf`-style templating:
+
+| Library | Time | Bytes Allocated | Objects Allocated |
+| :--- | :---: | :---: | :---: |
+| zerolog | 50 ns/op | 0 B/op | 0 allocs/op |
+| :zap: zap | 236 ns/op | 0 B/op | 0 allocs/op |
+| standard library | 453 ns/op | 80 B/op | 2 allocs/op |
+| :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op |
+| go-kit | 508 ns/op | 656 B/op | 13 allocs/op |
+| lion | 771 ns/op | 1224 B/op | 10 allocs/op |
+| logrus | 1244 ns/op | 1505 B/op | 27 allocs/op |
+| apex/log | 2751 ns/op | 584 B/op | 11 allocs/op |
+| log15 | 5181 ns/op | 1592 B/op | 26 allocs/op |
+
+## Caveats
+
+Note that zerolog does de-duplication fields. Using the same key multiple times creates multiple keys in final JSON:
+
+```go
+logger := zerolog.New(os.Stderr).With().Timestamp().Logger()
+logger.Info().
+       Timestamp().
+       Msg("dup")
+// Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"}
+```
+
+However, it’s not a big deal as JSON accepts dup keys; the last one prevails.
diff --git a/vendor/github.com/rs/zerolog/_config.yml b/vendor/github.com/rs/zerolog/_config.yml
new file mode 100644
index 0000000..a1e896d
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/_config.yml
@@ -0,0 +1 @@
+remote_theme: rs/gh-readme
diff --git a/vendor/github.com/rs/zerolog/array.go b/vendor/github.com/rs/zerolog/array.go
new file mode 100644
index 0000000..aa4f623
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/array.go
@@ -0,0 +1,224 @@
+package zerolog
+
+import (
+	"net"
+	"sync"
+	"time"
+)
+
+var arrayPool = &sync.Pool{
+	New: func() interface{} {
+		return &Array{
+			buf: make([]byte, 0, 500),
+		}
+	},
+}
+
+// Array is used to prepopulate an array of items
+// which can be re-used to add to log messages.
+type Array struct {
+	buf []byte
+}
+
+func putArray(a *Array) {
+	// Proper usage of a sync.Pool requires each entry to have approximately
+	// the same memory cost. To obtain this property when the stored type
+	// contains a variably-sized buffer, we add a hard limit on the maximum buffer
+	// to place back in the pool.
+	//
+	// See https://golang.org/issue/23199
+	const maxSize = 1 << 16 // 64KiB
+	if cap(a.buf) > maxSize {
+		return
+	}
+	arrayPool.Put(a)
+}
+
+// Arr creates an array to be added to an Event or Context.
+func Arr() *Array {
+	a := arrayPool.Get().(*Array)
+	a.buf = a.buf[:0]
+	return a
+}
+
+// MarshalZerologArray method here is no-op - since data is
+// already in the needed format.
+func (*Array) MarshalZerologArray(*Array) {
+}
+
+func (a *Array) write(dst []byte) []byte {
+	dst = enc.AppendArrayStart(dst)
+	if len(a.buf) > 0 {
+		dst = append(append(dst, a.buf...))
+	}
+	dst = enc.AppendArrayEnd(dst)
+	putArray(a)
+	return dst
+}
+
+// Object marshals an object that implement the LogObjectMarshaler
+// interface and append append it to the array.
+func (a *Array) Object(obj LogObjectMarshaler) *Array {
+	e := Dict()
+	obj.MarshalZerologObject(e)
+	e.buf = enc.AppendEndMarker(e.buf)
+	a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...)
+	putEvent(e)
+	return a
+}
+
+// Str append append the val as a string to the array.
+func (a *Array) Str(val string) *Array {
+	a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), val)
+	return a
+}
+
+// Bytes append append the val as a string to the array.
+func (a *Array) Bytes(val []byte) *Array {
+	a.buf = enc.AppendBytes(enc.AppendArrayDelim(a.buf), val)
+	return a
+}
+
+// Hex append append the val as a hex string to the array.
+func (a *Array) Hex(val []byte) *Array {
+	a.buf = enc.AppendHex(enc.AppendArrayDelim(a.buf), val)
+	return a
+}
+
+// Err serializes and appends the err to the array.
+func (a *Array) Err(err error) *Array {
+	marshaled := ErrorMarshalFunc(err)
+	switch m := marshaled.(type) {
+	case LogObjectMarshaler:
+		e := newEvent(nil, 0)
+		e.buf = e.buf[:0]
+		e.appendObject(m)
+		a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...)
+		putEvent(e)
+	case error:
+		a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m.Error())
+	case string:
+		a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m)
+	default:
+		a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), m)
+	}
+
+	return a
+}
+
+// Bool append append the val as a bool to the array.
+func (a *Array) Bool(b bool) *Array {
+	a.buf = enc.AppendBool(enc.AppendArrayDelim(a.buf), b)
+	return a
+}
+
+// Int append append i as a int to the array.
+func (a *Array) Int(i int) *Array {
+	a.buf = enc.AppendInt(enc.AppendArrayDelim(a.buf), i)
+	return a
+}
+
+// Int8 append append i as a int8 to the array.
+func (a *Array) Int8(i int8) *Array {
+	a.buf = enc.AppendInt8(enc.AppendArrayDelim(a.buf), i)
+	return a
+}
+
+// Int16 append append i as a int16 to the array.
+func (a *Array) Int16(i int16) *Array {
+	a.buf = enc.AppendInt16(enc.AppendArrayDelim(a.buf), i)
+	return a
+}
+
+// Int32 append append i as a int32 to the array.
+func (a *Array) Int32(i int32) *Array {
+	a.buf = enc.AppendInt32(enc.AppendArrayDelim(a.buf), i)
+	return a
+}
+
+// Int64 append append i as a int64 to the array.
+func (a *Array) Int64(i int64) *Array {
+	a.buf = enc.AppendInt64(enc.AppendArrayDelim(a.buf), i)
+	return a
+}
+
+// Uint append append i as a uint to the array.
+func (a *Array) Uint(i uint) *Array {
+	a.buf = enc.AppendUint(enc.AppendArrayDelim(a.buf), i)
+	return a
+}
+
+// Uint8 append append i as a uint8 to the array.
+func (a *Array) Uint8(i uint8) *Array {
+	a.buf = enc.AppendUint8(enc.AppendArrayDelim(a.buf), i)
+	return a
+}
+
+// Uint16 append append i as a uint16 to the array.
+func (a *Array) Uint16(i uint16) *Array {
+	a.buf = enc.AppendUint16(enc.AppendArrayDelim(a.buf), i)
+	return a
+}
+
+// Uint32 append append i as a uint32 to the array.
+func (a *Array) Uint32(i uint32) *Array {
+	a.buf = enc.AppendUint32(enc.AppendArrayDelim(a.buf), i)
+	return a
+}
+
+// Uint64 append append i as a uint64 to the array.
+func (a *Array) Uint64(i uint64) *Array {
+	a.buf = enc.AppendUint64(enc.AppendArrayDelim(a.buf), i)
+	return a
+}
+
+// Float32 append append f as a float32 to the array.
+func (a *Array) Float32(f float32) *Array {
+	a.buf = enc.AppendFloat32(enc.AppendArrayDelim(a.buf), f)
+	return a
+}
+
+// Float64 append append f as a float64 to the array.
+func (a *Array) Float64(f float64) *Array {
+	a.buf = enc.AppendFloat64(enc.AppendArrayDelim(a.buf), f)
+	return a
+}
+
+// Time append append t formated as string using zerolog.TimeFieldFormat.
+func (a *Array) Time(t time.Time) *Array {
+	a.buf = enc.AppendTime(enc.AppendArrayDelim(a.buf), t, TimeFieldFormat)
+	return a
+}
+
+// Dur append append d to the array.
+func (a *Array) Dur(d time.Duration) *Array {
+	a.buf = enc.AppendDuration(enc.AppendArrayDelim(a.buf), d, DurationFieldUnit, DurationFieldInteger)
+	return a
+}
+
+// Interface append append i marshaled using reflection.
+func (a *Array) Interface(i interface{}) *Array {
+	if obj, ok := i.(LogObjectMarshaler); ok {
+		return a.Object(obj)
+	}
+	a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), i)
+	return a
+}
+
+// IPAddr adds IPv4 or IPv6 address to the array
+func (a *Array) IPAddr(ip net.IP) *Array {
+	a.buf = enc.AppendIPAddr(enc.AppendArrayDelim(a.buf), ip)
+	return a
+}
+
+// IPPrefix adds IPv4 or IPv6 Prefix (IP + mask) to the array
+func (a *Array) IPPrefix(pfx net.IPNet) *Array {
+	a.buf = enc.AppendIPPrefix(enc.AppendArrayDelim(a.buf), pfx)
+	return a
+}
+
+// MACAddr adds a MAC (Ethernet) address to the array
+func (a *Array) MACAddr(ha net.HardwareAddr) *Array {
+	a.buf = enc.AppendMACAddr(enc.AppendArrayDelim(a.buf), ha)
+	return a
+}
diff --git a/vendor/github.com/rs/zerolog/console.go b/vendor/github.com/rs/zerolog/console.go
new file mode 100644
index 0000000..f25bc32
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/console.go
@@ -0,0 +1,369 @@
+package zerolog
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+)
+
+const (
+	colorBold = iota + 1
+	colorFaint
+)
+
+const (
+	colorBlack = iota + 30
+	colorRed
+	colorGreen
+	colorYellow
+	colorBlue
+	colorMagenta
+	colorCyan
+	colorWhite
+)
+
+var (
+	consoleBufPool = sync.Pool{
+		New: func() interface{} {
+			return bytes.NewBuffer(make([]byte, 0, 100))
+		},
+	}
+
+	consoleDefaultTimeFormat = time.Kitchen
+	consoleDefaultFormatter  = func(i interface{}) string { return fmt.Sprintf("%s", i) }
+	consoleDefaultPartsOrder = func() []string {
+		return []string{
+			TimestampFieldName,
+			LevelFieldName,
+			CallerFieldName,
+			MessageFieldName,
+		}
+	}
+
+	consoleNoColor    = false
+	consoleTimeFormat = consoleDefaultTimeFormat
+)
+
+// Formatter transforms the input into a formatted string.
+type Formatter func(interface{}) string
+
+// ConsoleWriter parses the JSON input and writes it in an
+// (optionally) colorized, human-friendly format to Out.
+type ConsoleWriter struct {
+	// Out is the output destination.
+	Out io.Writer
+
+	// NoColor disables the colorized output.
+	NoColor bool
+
+	// TimeFormat specifies the format for timestamp in output.
+	TimeFormat string
+
+	// PartsOrder defines the order of parts in output.
+	PartsOrder []string
+
+	FormatTimestamp     Formatter
+	FormatLevel         Formatter
+	FormatCaller        Formatter
+	FormatMessage       Formatter
+	FormatFieldName     Formatter
+	FormatFieldValue    Formatter
+	FormatErrFieldName  Formatter
+	FormatErrFieldValue Formatter
+}
+
+// NewConsoleWriter creates and initializes a new ConsoleWriter.
+func NewConsoleWriter(options ...func(w *ConsoleWriter)) ConsoleWriter {
+	w := ConsoleWriter{
+		Out:        os.Stdout,
+		TimeFormat: consoleDefaultTimeFormat,
+		PartsOrder: consoleDefaultPartsOrder(),
+	}
+
+	for _, opt := range options {
+		opt(&w)
+	}
+
+	return w
+}
+
+// Write transforms the JSON input with formatters and appends to w.Out.
+func (w ConsoleWriter) Write(p []byte) (n int, err error) {
+	if w.PartsOrder == nil {
+		w.PartsOrder = consoleDefaultPartsOrder()
+	}
+	if w.TimeFormat == "" && consoleTimeFormat != consoleDefaultTimeFormat {
+		consoleTimeFormat = consoleDefaultTimeFormat
+	}
+	if w.TimeFormat != "" && consoleTimeFormat != w.TimeFormat {
+		consoleTimeFormat = w.TimeFormat
+	}
+	if w.NoColor == false && consoleNoColor != false {
+		consoleNoColor = false
+	}
+	if w.NoColor == true && consoleNoColor != w.NoColor {
+		consoleNoColor = w.NoColor
+	}
+
+	var buf = consoleBufPool.Get().(*bytes.Buffer)
+	defer consoleBufPool.Put(buf)
+
+	var evt map[string]interface{}
+	p = decodeIfBinaryToBytes(p)
+	d := json.NewDecoder(bytes.NewReader(p))
+	d.UseNumber()
+	err = d.Decode(&evt)
+	if err != nil {
+		return n, fmt.Errorf("cannot decode event: %s", err)
+	}
+
+	for _, p := range w.PartsOrder {
+		w.writePart(buf, evt, p)
+	}
+
+	w.writeFields(evt, buf)
+
+	buf.WriteByte('\n')
+	buf.WriteTo(w.Out)
+	return len(p), nil
+}
+
+// writeFields appends formatted key-value pairs to buf.
+func (w ConsoleWriter) writeFields(evt map[string]interface{}, buf *bytes.Buffer) {
+	var fields = make([]string, 0, len(evt))
+	for field := range evt {
+		switch field {
+		case LevelFieldName, TimestampFieldName, MessageFieldName, CallerFieldName:
+			continue
+		}
+		fields = append(fields, field)
+	}
+	sort.Strings(fields)
+
+	if len(fields) > 0 {
+		buf.WriteByte(' ')
+	}
+
+	// Move the "error" field to the front
+	ei := sort.Search(len(fields), func(i int) bool { return fields[i] >= ErrorFieldName })
+	if ei < len(fields) && fields[ei] == ErrorFieldName {
+		fields[ei] = ""
+		fields = append([]string{ErrorFieldName}, fields...)
+		var xfields = make([]string, 0, len(fields))
+		for _, field := range fields {
+			if field == "" { // Skip empty fields
+				continue
+			}
+			xfields = append(xfields, field)
+		}
+		fields = xfields
+	}
+
+	for i, field := range fields {
+		var fn Formatter
+		var fv Formatter
+
+		if field == ErrorFieldName {
+			if w.FormatErrFieldName == nil {
+				fn = consoleDefaultFormatErrFieldName
+			} else {
+				fn = w.FormatErrFieldName
+			}
+
+			if w.FormatErrFieldValue == nil {
+				fv = consoleDefaultFormatErrFieldValue
+			} else {
+				fv = w.FormatErrFieldValue
+			}
+		} else {
+			if w.FormatFieldName == nil {
+				fn = consoleDefaultFormatFieldName
+			} else {
+				fn = w.FormatFieldName
+			}
+
+			if w.FormatFieldValue == nil {
+				fv = consoleDefaultFormatFieldValue
+			} else {
+				fv = w.FormatFieldValue
+			}
+		}
+
+		buf.WriteString(fn(field))
+
+		switch fValue := evt[field].(type) {
+		case string:
+			if needsQuote(fValue) {
+				buf.WriteString(fv(strconv.Quote(fValue)))
+			} else {
+				buf.WriteString(fv(fValue))
+			}
+		case json.Number:
+			buf.WriteString(fv(fValue))
+		default:
+			b, err := json.Marshal(fValue)
+			if err != nil {
+				fmt.Fprintf(buf, colorize("[error: %v]", colorRed, w.NoColor), err)
+			} else {
+				fmt.Fprint(buf, fv(b))
+			}
+		}
+
+		if i < len(fields)-1 { // Skip space for last field
+			buf.WriteByte(' ')
+		}
+	}
+}
+
+// writePart appends a formatted part to buf.
+func (w ConsoleWriter) writePart(buf *bytes.Buffer, evt map[string]interface{}, p string) {
+	var f Formatter
+
+	switch p {
+	case LevelFieldName:
+		if w.FormatLevel == nil {
+			f = consoleDefaultFormatLevel
+		} else {
+			f = w.FormatLevel
+		}
+	case TimestampFieldName:
+		if w.FormatTimestamp == nil {
+			f = consoleDefaultFormatTimestamp
+		} else {
+			f = w.FormatTimestamp
+		}
+	case MessageFieldName:
+		if w.FormatMessage == nil {
+			f = consoleDefaultFormatMessage
+		} else {
+			f = w.FormatMessage
+		}
+	case CallerFieldName:
+		if w.FormatCaller == nil {
+			f = consoleDefaultFormatCaller
+		} else {
+			f = w.FormatCaller
+		}
+	default:
+		if w.FormatFieldValue == nil {
+			f = consoleDefaultFormatFieldValue
+		} else {
+			f = w.FormatFieldValue
+		}
+	}
+
+	var s = f(evt[p])
+
+	if len(s) > 0 {
+		buf.WriteString(s)
+		if p != w.PartsOrder[len(w.PartsOrder)-1] { // Skip space for last part
+			buf.WriteByte(' ')
+		}
+	}
+}
+
+// needsQuote returns true when the string s should be quoted in output.
+func needsQuote(s string) bool {
+	for i := range s {
+		if s[i] < 0x20 || s[i] > 0x7e || s[i] == ' ' || s[i] == '\\' || s[i] == '"' {
+			return true
+		}
+	}
+	return false
+}
+
+// colorize returns the string s wrapped in ANSI code c, unless disabled is true.
+func colorize(s interface{}, c int, disabled bool) string {
+	if disabled {
+		return fmt.Sprintf("%s", s)
+	}
+	return fmt.Sprintf("\x1b[%dm%v\x1b[0m", c, s)
+}
+
+// ----- DEFAULT FORMATTERS ---------------------------------------------------
+
+var (
+	consoleDefaultFormatTimestamp = func(i interface{}) string {
+		t := "<nil>"
+		switch tt := i.(type) {
+		case string:
+			ts, err := time.Parse(time.RFC3339, tt)
+			if err != nil {
+				t = tt
+			} else {
+				t = ts.Format(consoleTimeFormat)
+			}
+		case json.Number:
+			t = tt.String()
+		}
+		return colorize(t, colorFaint, consoleNoColor)
+	}
+
+	consoleDefaultFormatLevel = func(i interface{}) string {
+		var l string
+		if ll, ok := i.(string); ok {
+			switch ll {
+			case "debug":
+				l = colorize("DBG", colorYellow, consoleNoColor)
+			case "info":
+				l = colorize("INF", colorGreen, consoleNoColor)
+			case "warn":
+				l = colorize("WRN", colorRed, consoleNoColor)
+			case "error":
+				l = colorize(colorize("ERR", colorRed, consoleNoColor), colorBold, consoleNoColor)
+			case "fatal":
+				l = colorize(colorize("FTL", colorRed, consoleNoColor), colorBold, consoleNoColor)
+			case "panic":
+				l = colorize(colorize("PNC", colorRed, consoleNoColor), colorBold, consoleNoColor)
+			default:
+				l = colorize("???", colorBold, consoleNoColor)
+			}
+		} else {
+			l = strings.ToUpper(fmt.Sprintf("%s", i))[0:3]
+		}
+		return l
+	}
+
+	consoleDefaultFormatCaller = func(i interface{}) string {
+		var c string
+		if cc, ok := i.(string); ok {
+			c = cc
+		}
+		if len(c) > 0 {
+			cwd, err := os.Getwd()
+			if err == nil {
+				c = strings.TrimPrefix(c, cwd)
+				c = strings.TrimPrefix(c, "/")
+			}
+			c = colorize(c, colorBold, consoleNoColor) + colorize(" >", colorFaint, consoleNoColor)
+		}
+		return c
+	}
+
+	consoleDefaultFormatMessage = func(i interface{}) string {
+		return fmt.Sprintf("%s", i)
+	}
+
+	consoleDefaultFormatFieldName = func(i interface{}) string {
+		return colorize(fmt.Sprintf("%s=", i), colorFaint, consoleNoColor)
+	}
+
+	consoleDefaultFormatFieldValue = func(i interface{}) string {
+		return fmt.Sprintf("%s", i)
+	}
+
+	consoleDefaultFormatErrFieldName = func(i interface{}) string {
+		return colorize(fmt.Sprintf("%s=", i), colorRed, consoleNoColor)
+	}
+
+	consoleDefaultFormatErrFieldValue = func(i interface{}) string {
+		return colorize(fmt.Sprintf("%s", i), colorRed, consoleNoColor)
+	}
+)
diff --git a/vendor/github.com/rs/zerolog/context.go b/vendor/github.com/rs/zerolog/context.go
new file mode 100644
index 0000000..b843179
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/context.go
@@ -0,0 +1,381 @@
+package zerolog
+
+import (
+	"io/ioutil"
+	"net"
+	"time"
+)
+
+// Context configures a new sub-logger with contextual fields.
+type Context struct {
+	l Logger
+}
+
+// Logger returns the logger with the context previously set.
+func (c Context) Logger() Logger {
+	return c.l
+}
+
+// Fields is a helper function to use a map to set fields using type assertion.
+func (c Context) Fields(fields map[string]interface{}) Context {
+	c.l.context = appendFields(c.l.context, fields)
+	return c
+}
+
+// Dict adds the field key with the dict to the logger context.
+func (c Context) Dict(key string, dict *Event) Context {
+	dict.buf = enc.AppendEndMarker(dict.buf)
+	c.l.context = append(enc.AppendKey(c.l.context, key), dict.buf...)
+	putEvent(dict)
+	return c
+}
+
+// Array adds the field key with an array to the event context.
+// Use zerolog.Arr() to create the array or pass a type that
+// implement the LogArrayMarshaler interface.
+func (c Context) Array(key string, arr LogArrayMarshaler) Context {
+	c.l.context = enc.AppendKey(c.l.context, key)
+	if arr, ok := arr.(*Array); ok {
+		c.l.context = arr.write(c.l.context)
+		return c
+	}
+	var a *Array
+	if aa, ok := arr.(*Array); ok {
+		a = aa
+	} else {
+		a = Arr()
+		arr.MarshalZerologArray(a)
+	}
+	c.l.context = a.write(c.l.context)
+	return c
+}
+
+// Object marshals an object that implement the LogObjectMarshaler interface.
+func (c Context) Object(key string, obj LogObjectMarshaler) Context {
+	e := newEvent(levelWriterAdapter{ioutil.Discard}, 0)
+	e.Object(key, obj)
+	c.l.context = enc.AppendObjectData(c.l.context, e.buf)
+	putEvent(e)
+	return c
+}
+
+// EmbedObject marshals and Embeds an object that implement the LogObjectMarshaler interface.
+func (c Context) EmbedObject(obj LogObjectMarshaler) Context {
+	e := newEvent(levelWriterAdapter{ioutil.Discard}, 0)
+	e.EmbedObject(obj)
+	c.l.context = enc.AppendObjectData(c.l.context, e.buf)
+	putEvent(e)
+	return c
+}
+
+// Str adds the field key with val as a string to the logger context.
+func (c Context) Str(key, val string) Context {
+	c.l.context = enc.AppendString(enc.AppendKey(c.l.context, key), val)
+	return c
+}
+
+// Strs adds the field key with val as a string to the logger context.
+func (c Context) Strs(key string, vals []string) Context {
+	c.l.context = enc.AppendStrings(enc.AppendKey(c.l.context, key), vals)
+	return c
+}
+
+// Bytes adds the field key with val as a []byte to the logger context.
+func (c Context) Bytes(key string, val []byte) Context {
+	c.l.context = enc.AppendBytes(enc.AppendKey(c.l.context, key), val)
+	return c
+}
+
+// Hex adds the field key with val as a hex string to the logger context.
+func (c Context) Hex(key string, val []byte) Context {
+	c.l.context = enc.AppendHex(enc.AppendKey(c.l.context, key), val)
+	return c
+}
+
+// RawJSON adds already encoded JSON to context.
+//
+// No sanity check is performed on b; it must not contain carriage returns and
+// be valid JSON.
+func (c Context) RawJSON(key string, b []byte) Context {
+	c.l.context = appendJSON(enc.AppendKey(c.l.context, key), b)
+	return c
+}
+
+// AnErr adds the field key with serialized err to the logger context.
+func (c Context) AnErr(key string, err error) Context {
+	marshaled := ErrorMarshalFunc(err)
+	switch m := marshaled.(type) {
+	case nil:
+		return c
+	case LogObjectMarshaler:
+		return c.Object(key, m)
+	case error:
+		return c.Str(key, m.Error())
+	case string:
+		return c.Str(key, m)
+	default:
+		return c.Interface(key, m)
+	}
+}
+
+// Errs adds the field key with errs as an array of serialized errors to the
+// logger context.
+func (c Context) Errs(key string, errs []error) Context {
+	arr := Arr()
+	for _, err := range errs {
+		marshaled := ErrorMarshalFunc(err)
+		switch m := marshaled.(type) {
+		case LogObjectMarshaler:
+			arr = arr.Object(m)
+		case error:
+			arr = arr.Str(m.Error())
+		case string:
+			arr = arr.Str(m)
+		default:
+			arr = arr.Interface(m)
+		}
+	}
+
+	return c.Array(key, arr)
+}
+
+// Err adds the field "error" with serialized err to the logger context.
+func (c Context) Err(err error) Context {
+	return c.AnErr(ErrorFieldName, err)
+}
+
+// Bool adds the field key with val as a bool to the logger context.
+func (c Context) Bool(key string, b bool) Context {
+	c.l.context = enc.AppendBool(enc.AppendKey(c.l.context, key), b)
+	return c
+}
+
+// Bools adds the field key with val as a []bool to the logger context.
+func (c Context) Bools(key string, b []bool) Context {
+	c.l.context = enc.AppendBools(enc.AppendKey(c.l.context, key), b)
+	return c
+}
+
+// Int adds the field key with i as a int to the logger context.
+func (c Context) Int(key string, i int) Context {
+	c.l.context = enc.AppendInt(enc.AppendKey(c.l.context, key), i)
+	return c
+}
+
+// Ints adds the field key with i as a []int to the logger context.
+func (c Context) Ints(key string, i []int) Context {
+	c.l.context = enc.AppendInts(enc.AppendKey(c.l.context, key), i)
+	return c
+}
+
+// Int8 adds the field key with i as a int8 to the logger context.
+func (c Context) Int8(key string, i int8) Context {
+	c.l.context = enc.AppendInt8(enc.AppendKey(c.l.context, key), i)
+	return c
+}
+
+// Ints8 adds the field key with i as a []int8 to the logger context.
+func (c Context) Ints8(key string, i []int8) Context {
+	c.l.context = enc.AppendInts8(enc.AppendKey(c.l.context, key), i)
+	return c
+}
+
+// Int16 adds the field key with i as a int16 to the logger context.
+func (c Context) Int16(key string, i int16) Context {
+	c.l.context = enc.AppendInt16(enc.AppendKey(c.l.context, key), i)
+	return c
+}
+
+// Ints16 adds the field key with i as a []int16 to the logger context.
+func (c Context) Ints16(key string, i []int16) Context {
+	c.l.context = enc.AppendInts16(enc.AppendKey(c.l.context, key), i)
+	return c
+}
+
+// Int32 adds the field key with i as a int32 to the logger context.
+func (c Context) Int32(key string, i int32) Context {
+	c.l.context = enc.AppendInt32(enc.AppendKey(c.l.context, key), i)
+	return c
+}
+
+// Ints32 adds the field key with i as a []int32 to the logger context.
+func (c Context) Ints32(key string, i []int32) Context {
+	c.l.context = enc.AppendInts32(enc.AppendKey(c.l.context, key), i)
+	return c
+}
+
+// Int64 adds the field key with i as a int64 to the logger context.
+func (c Context) Int64(key string, i int64) Context {
+	c.l.context = enc.AppendInt64(enc.AppendKey(c.l.context, key), i)
+	return c
+}
+
+// Ints64 adds the field key with i as a []int64 to the logger context.
+func (c Context) Ints64(key string, i []int64) Context {
+	c.l.context = enc.AppendInts64(enc.AppendKey(c.l.context, key), i)
+	return c
+}
+
+// Uint adds the field key with i as a uint to the logger context.
+func (c Context) Uint(key string, i uint) Context {
+	c.l.context = enc.AppendUint(enc.AppendKey(c.l.context, key), i)
+	return c
+}
+
+// Uints adds the field key with i as a []uint to the logger context.
+func (c Context) Uints(key string, i []uint) Context {
+	c.l.context = enc.AppendUints(enc.AppendKey(c.l.context, key), i)
+	return c
+}
+
+// Uint8 adds the field key with i as a uint8 to the logger context.
+func (c Context) Uint8(key string, i uint8) Context {
+	c.l.context = enc.AppendUint8(enc.AppendKey(c.l.context, key), i)
+	return c
+}
+
+// Uints8 adds the field key with i as a []uint8 to the logger context.
+func (c Context) Uints8(key string, i []uint8) Context {
+	c.l.context = enc.AppendUints8(enc.AppendKey(c.l.context, key), i)
+	return c
+}
+
+// Uint16 adds the field key with i as a uint16 to the logger context.
+func (c Context) Uint16(key string, i uint16) Context {
+	c.l.context = enc.AppendUint16(enc.AppendKey(c.l.context, key), i)
+	return c
+}
+
+// Uints16 adds the field key with i as a []uint16 to the logger context.
+func (c Context) Uints16(key string, i []uint16) Context {
+	c.l.context = enc.AppendUints16(enc.AppendKey(c.l.context, key), i)
+	return c
+}
+
+// Uint32 adds the field key with i as a uint32 to the logger context.
+func (c Context) Uint32(key string, i uint32) Context {
+	c.l.context = enc.AppendUint32(enc.AppendKey(c.l.context, key), i)
+	return c
+}
+
+// Uints32 adds the field key with i as a []uint32 to the logger context.
+func (c Context) Uints32(key string, i []uint32) Context {
+	c.l.context = enc.AppendUints32(enc.AppendKey(c.l.context, key), i)
+	return c
+}
+
+// Uint64 adds the field key with i as a uint64 to the logger context.
+func (c Context) Uint64(key string, i uint64) Context {
+	c.l.context = enc.AppendUint64(enc.AppendKey(c.l.context, key), i)
+	return c
+}
+
+// Uints64 adds the field key with i as a []uint64 to the logger context.
+func (c Context) Uints64(key string, i []uint64) Context {
+	c.l.context = enc.AppendUints64(enc.AppendKey(c.l.context, key), i)
+	return c
+}
+
+// Float32 adds the field key with f as a float32 to the logger context.
+func (c Context) Float32(key string, f float32) Context {
+	c.l.context = enc.AppendFloat32(enc.AppendKey(c.l.context, key), f)
+	return c
+}
+
+// Floats32 adds the field key with f as a []float32 to the logger context.
+func (c Context) Floats32(key string, f []float32) Context {
+	c.l.context = enc.AppendFloats32(enc.AppendKey(c.l.context, key), f)
+	return c
+}
+
+// Float64 adds the field key with f as a float64 to the logger context.
+func (c Context) Float64(key string, f float64) Context {
+	c.l.context = enc.AppendFloat64(enc.AppendKey(c.l.context, key), f)
+	return c
+}
+
+// Floats64 adds the field key with f as a []float64 to the logger context.
+func (c Context) Floats64(key string, f []float64) Context {
+	c.l.context = enc.AppendFloats64(enc.AppendKey(c.l.context, key), f)
+	return c
+}
+
+type timestampHook struct{}
+
+func (ts timestampHook) Run(e *Event, level Level, msg string) {
+	e.Timestamp()
+}
+
+var th = timestampHook{}
+
+// Timestamp adds the current local time as UNIX timestamp to the logger context with the "time" key.
+// To customize the key name, change zerolog.TimestampFieldName.
+//
+// NOTE: It won't dedupe the "time" key if the *Context has one already.
+func (c Context) Timestamp() Context {
+	c.l = c.l.Hook(th)
+	return c
+}
+
+// Time adds the field key with t formated as string using zerolog.TimeFieldFormat.
+func (c Context) Time(key string, t time.Time) Context {
+	c.l.context = enc.AppendTime(enc.AppendKey(c.l.context, key), t, TimeFieldFormat)
+	return c
+}
+
+// Times adds the field key with t formated as string using zerolog.TimeFieldFormat.
+func (c Context) Times(key string, t []time.Time) Context {
+	c.l.context = enc.AppendTimes(enc.AppendKey(c.l.context, key), t, TimeFieldFormat)
+	return c
+}
+
+// Dur adds the fields key with d divided by unit and stored as a float.
+func (c Context) Dur(key string, d time.Duration) Context {
+	c.l.context = enc.AppendDuration(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger)
+	return c
+}
+
+// Durs adds the fields key with d divided by unit and stored as a float.
+func (c Context) Durs(key string, d []time.Duration) Context {
+	c.l.context = enc.AppendDurations(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger)
+	return c
+}
+
+// Interface adds the field key with obj marshaled using reflection.
+func (c Context) Interface(key string, i interface{}) Context {
+	c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), i)
+	return c
+}
+
+type callerHook struct{}
+
+func (ch callerHook) Run(e *Event, level Level, msg string) {
+	// Three extra frames to skip (added by hook infra).
+	e.caller(CallerSkipFrameCount + 3)
+}
+
+var ch = callerHook{}
+
+// Caller adds the file:line of the caller with the zerolog.CallerFieldName key.
+func (c Context) Caller() Context {
+	c.l = c.l.Hook(ch)
+	return c
+}
+
+// IPAddr adds IPv4 or IPv6 Address to the context
+func (c Context) IPAddr(key string, ip net.IP) Context {
+	c.l.context = enc.AppendIPAddr(enc.AppendKey(c.l.context, key), ip)
+	return c
+}
+
+// IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the context
+func (c Context) IPPrefix(key string, pfx net.IPNet) Context {
+	c.l.context = enc.AppendIPPrefix(enc.AppendKey(c.l.context, key), pfx)
+	return c
+}
+
+// MACAddr adds MAC address to the context
+func (c Context) MACAddr(key string, ha net.HardwareAddr) Context {
+	c.l.context = enc.AppendMACAddr(enc.AppendKey(c.l.context, key), ha)
+	return c
+}
diff --git a/vendor/github.com/rs/zerolog/ctx.go b/vendor/github.com/rs/zerolog/ctx.go
new file mode 100644
index 0000000..2b7a682
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/ctx.go
@@ -0,0 +1,47 @@
+package zerolog
+
+import (
+	"context"
+)
+
+var disabledLogger *Logger
+
+func init() {
+	l := Nop()
+	disabledLogger = &l
+}
+
+type ctxKey struct{}
+
+// WithContext returns a copy of ctx with l associated. If an instance of Logger
+// is already in the context, the context is not updated.
+//
+// For instance, to add a field to an existing logger in the context, use this
+// notation:
+//
+//     ctx := r.Context()
+//     l := zerolog.Ctx(ctx)
+//     l.UpdateContext(func(c Context) Context {
+//         return c.Str("bar", "baz")
+//     })
+func (l *Logger) WithContext(ctx context.Context) context.Context {
+	if lp, ok := ctx.Value(ctxKey{}).(*Logger); ok {
+		if lp == l {
+			// Do not store same logger.
+			return ctx
+		}
+	} else if l.level == Disabled {
+		// Do not store disabled logger.
+		return ctx
+	}
+	return context.WithValue(ctx, ctxKey{}, l)
+}
+
+// Ctx returns the Logger associated with the ctx. If no logger
+// is associated, a disabled logger is returned.
+func Ctx(ctx context.Context) *Logger {
+	if l, ok := ctx.Value(ctxKey{}).(*Logger); ok {
+		return l
+	}
+	return disabledLogger
+}
diff --git a/vendor/github.com/rs/zerolog/encoder.go b/vendor/github.com/rs/zerolog/encoder.go
new file mode 100644
index 0000000..09b24e8
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/encoder.go
@@ -0,0 +1,56 @@
+package zerolog
+
+import (
+	"net"
+	"time"
+)
+
+type encoder interface {
+	AppendArrayDelim(dst []byte) []byte
+	AppendArrayEnd(dst []byte) []byte
+	AppendArrayStart(dst []byte) []byte
+	AppendBeginMarker(dst []byte) []byte
+	AppendBool(dst []byte, val bool) []byte
+	AppendBools(dst []byte, vals []bool) []byte
+	AppendBytes(dst, s []byte) []byte
+	AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte
+	AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte
+	AppendEndMarker(dst []byte) []byte
+	AppendFloat32(dst []byte, val float32) []byte
+	AppendFloat64(dst []byte, val float64) []byte
+	AppendFloats32(dst []byte, vals []float32) []byte
+	AppendFloats64(dst []byte, vals []float64) []byte
+	AppendHex(dst, s []byte) []byte
+	AppendIPAddr(dst []byte, ip net.IP) []byte
+	AppendIPPrefix(dst []byte, pfx net.IPNet) []byte
+	AppendInt(dst []byte, val int) []byte
+	AppendInt16(dst []byte, val int16) []byte
+	AppendInt32(dst []byte, val int32) []byte
+	AppendInt64(dst []byte, val int64) []byte
+	AppendInt8(dst []byte, val int8) []byte
+	AppendInterface(dst []byte, i interface{}) []byte
+	AppendInts(dst []byte, vals []int) []byte
+	AppendInts16(dst []byte, vals []int16) []byte
+	AppendInts32(dst []byte, vals []int32) []byte
+	AppendInts64(dst []byte, vals []int64) []byte
+	AppendInts8(dst []byte, vals []int8) []byte
+	AppendKey(dst []byte, key string) []byte
+	AppendLineBreak(dst []byte) []byte
+	AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte
+	AppendNil(dst []byte) []byte
+	AppendObjectData(dst []byte, o []byte) []byte
+	AppendString(dst []byte, s string) []byte
+	AppendStrings(dst []byte, vals []string) []byte
+	AppendTime(dst []byte, t time.Time, format string) []byte
+	AppendTimes(dst []byte, vals []time.Time, format string) []byte
+	AppendUint(dst []byte, val uint) []byte
+	AppendUint16(dst []byte, val uint16) []byte
+	AppendUint32(dst []byte, val uint32) []byte
+	AppendUint64(dst []byte, val uint64) []byte
+	AppendUint8(dst []byte, val uint8) []byte
+	AppendUints(dst []byte, vals []uint) []byte
+	AppendUints16(dst []byte, vals []uint16) []byte
+	AppendUints32(dst []byte, vals []uint32) []byte
+	AppendUints64(dst []byte, vals []uint64) []byte
+	AppendUints8(dst []byte, vals []uint8) []byte
+}
diff --git a/vendor/github.com/rs/zerolog/encoder_cbor.go b/vendor/github.com/rs/zerolog/encoder_cbor.go
new file mode 100644
index 0000000..f8d3fe9
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/encoder_cbor.go
@@ -0,0 +1,35 @@
+// +build binary_log
+
+package zerolog
+
+// This file contains bindings to do binary encoding.
+
+import (
+	"github.com/rs/zerolog/internal/cbor"
+)
+
+var (
+	_ encoder = (*cbor.Encoder)(nil)
+
+	enc = cbor.Encoder{}
+)
+
+func appendJSON(dst []byte, j []byte) []byte {
+	return cbor.AppendEmbeddedJSON(dst, j)
+}
+
+// decodeIfBinaryToString - converts a binary formatted log msg to a
+// JSON formatted String Log message.
+func decodeIfBinaryToString(in []byte) string {
+	return cbor.DecodeIfBinaryToString(in)
+}
+
+func decodeObjectToStr(in []byte) string {
+	return cbor.DecodeObjectToStr(in)
+}
+
+// decodeIfBinaryToBytes - converts a binary formatted log msg to a
+// JSON formatted Bytes Log message.
+func decodeIfBinaryToBytes(in []byte) []byte {
+	return cbor.DecodeIfBinaryToBytes(in)
+}
diff --git a/vendor/github.com/rs/zerolog/encoder_json.go b/vendor/github.com/rs/zerolog/encoder_json.go
new file mode 100644
index 0000000..fe580f5
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/encoder_json.go
@@ -0,0 +1,32 @@
+// +build !binary_log
+
+package zerolog
+
+// encoder_json.go file contains bindings to generate
+// JSON encoded byte stream.
+
+import (
+	"github.com/rs/zerolog/internal/json"
+)
+
+var (
+	_ encoder = (*json.Encoder)(nil)
+
+	enc = json.Encoder{}
+)
+
+func appendJSON(dst []byte, j []byte) []byte {
+	return append(dst, j...)
+}
+
+func decodeIfBinaryToString(in []byte) string {
+	return string(in)
+}
+
+func decodeObjectToStr(in []byte) string {
+	return string(in)
+}
+
+func decodeIfBinaryToBytes(in []byte) []byte {
+	return in
+}
diff --git a/vendor/github.com/rs/zerolog/event.go b/vendor/github.com/rs/zerolog/event.go
new file mode 100644
index 0000000..a0e090a
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/event.go
@@ -0,0 +1,677 @@
+package zerolog
+
+import (
+	"fmt"
+	"net"
+	"os"
+	"runtime"
+	"strconv"
+	"sync"
+	"time"
+)
+
+var eventPool = &sync.Pool{
+	New: func() interface{} {
+		return &Event{
+			buf: make([]byte, 0, 500),
+		}
+	},
+}
+
+// ErrorMarshalFunc allows customization of global error marshaling
+var ErrorMarshalFunc = func(err error) interface{} {
+	return err
+}
+
+// Event represents a log event. It is instanced by one of the level method of
+// Logger and finalized by the Msg or Msgf method.
+type Event struct {
+	buf   []byte
+	w     LevelWriter
+	level Level
+	done  func(msg string)
+	ch    []Hook // hooks from context
+}
+
+func putEvent(e *Event) {
+	// Proper usage of a sync.Pool requires each entry to have approximately
+	// the same memory cost. To obtain this property when the stored type
+	// contains a variably-sized buffer, we add a hard limit on the maximum buffer
+	// to place back in the pool.
+	//
+	// See https://golang.org/issue/23199
+	const maxSize = 1 << 16 // 64KiB
+	if cap(e.buf) > maxSize {
+		return
+	}
+	eventPool.Put(e)
+}
+
+// LogObjectMarshaler provides a strongly-typed and encoding-agnostic interface
+// to be implemented by types used with Event/Context's Object methods.
+type LogObjectMarshaler interface {
+	MarshalZerologObject(e *Event)
+}
+
+// LogArrayMarshaler provides a strongly-typed and encoding-agnostic interface
+// to be implemented by types used with Event/Context's Array methods.
+type LogArrayMarshaler interface {
+	MarshalZerologArray(a *Array)
+}
+
+func newEvent(w LevelWriter, level Level) *Event {
+	e := eventPool.Get().(*Event)
+	e.buf = e.buf[:0]
+	e.ch = nil
+	e.buf = enc.AppendBeginMarker(e.buf)
+	e.w = w
+	e.level = level
+	return e
+}
+
+func (e *Event) write() (err error) {
+	if e == nil {
+		return nil
+	}
+	if e.level != Disabled {
+		e.buf = enc.AppendEndMarker(e.buf)
+		e.buf = enc.AppendLineBreak(e.buf)
+		if e.w != nil {
+			_, err = e.w.WriteLevel(e.level, e.buf)
+		}
+	}
+	putEvent(e)
+	return
+}
+
+// Enabled return false if the *Event is going to be filtered out by
+// log level or sampling.
+func (e *Event) Enabled() bool {
+	return e != nil && e.level != Disabled
+}
+
+// Discard disables the event so Msg(f) won't print it.
+func (e *Event) Discard() *Event {
+	if e == nil {
+		return e
+	}
+	e.level = Disabled
+	return nil
+}
+
+// Msg sends the *Event with msg added as the message field if not empty.
+//
+// NOTICE: once this method is called, the *Event should be disposed.
+// Calling Msg twice can have unexpected result.
+func (e *Event) Msg(msg string) {
+	if e == nil {
+		return
+	}
+	e.msg(msg)
+}
+
+// Msgf sends the event with formated msg added as the message field if not empty.
+//
+// NOTICE: once this methid is called, the *Event should be disposed.
+// Calling Msg twice can have unexpected result.
+func (e *Event) Msgf(format string, v ...interface{}) {
+	if e == nil {
+		return
+	}
+	e.msg(fmt.Sprintf(format, v...))
+}
+
+func (e *Event) msg(msg string) {
+	if len(e.ch) > 0 {
+		e.ch[0].Run(e, e.level, msg)
+		if len(e.ch) > 1 {
+			for _, hook := range e.ch[1:] {
+				hook.Run(e, e.level, msg)
+			}
+		}
+	}
+	if msg != "" {
+		e.buf = enc.AppendString(enc.AppendKey(e.buf, MessageFieldName), msg)
+	}
+	if e.done != nil {
+		defer e.done(msg)
+	}
+	if err := e.write(); err != nil {
+		if ErrorHandler != nil {
+			ErrorHandler(err)
+		} else {
+			fmt.Fprintf(os.Stderr, "zerolog: could not write event: %v\n", err)
+		}
+	}
+}
+
+// Fields is a helper function to use a map to set fields using type assertion.
+func (e *Event) Fields(fields map[string]interface{}) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = appendFields(e.buf, fields)
+	return e
+}
+
+// Dict adds the field key with a dict to the event context.
+// Use zerolog.Dict() to create the dictionary.
+func (e *Event) Dict(key string, dict *Event) *Event {
+	if e == nil {
+		return e
+	}
+	dict.buf = enc.AppendEndMarker(dict.buf)
+	e.buf = append(enc.AppendKey(e.buf, key), dict.buf...)
+	putEvent(dict)
+	return e
+}
+
+// Dict creates an Event to be used with the *Event.Dict method.
+// Call usual field methods like Str, Int etc to add fields to this
+// event and give it as argument the *Event.Dict method.
+func Dict() *Event {
+	return newEvent(nil, 0)
+}
+
+// Array adds the field key with an array to the event context.
+// Use zerolog.Arr() to create the array or pass a type that
+// implement the LogArrayMarshaler interface.
+func (e *Event) Array(key string, arr LogArrayMarshaler) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendKey(e.buf, key)
+	var a *Array
+	if aa, ok := arr.(*Array); ok {
+		a = aa
+	} else {
+		a = Arr()
+		arr.MarshalZerologArray(a)
+	}
+	e.buf = a.write(e.buf)
+	return e
+}
+
+func (e *Event) appendObject(obj LogObjectMarshaler) {
+	e.buf = enc.AppendBeginMarker(e.buf)
+	obj.MarshalZerologObject(e)
+	e.buf = enc.AppendEndMarker(e.buf)
+}
+
+// Object marshals an object that implement the LogObjectMarshaler interface.
+func (e *Event) Object(key string, obj LogObjectMarshaler) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendKey(e.buf, key)
+	e.appendObject(obj)
+	return e
+}
+
+// Object marshals an object that implement the LogObjectMarshaler interface.
+func (e *Event) EmbedObject(obj LogObjectMarshaler) *Event {
+	if e == nil {
+		return e
+	}
+	obj.MarshalZerologObject(e)
+	return e
+}
+
+// Str adds the field key with val as a string to the *Event context.
+func (e *Event) Str(key, val string) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendString(enc.AppendKey(e.buf, key), val)
+	return e
+}
+
+// Strs adds the field key with vals as a []string to the *Event context.
+func (e *Event) Strs(key string, vals []string) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendStrings(enc.AppendKey(e.buf, key), vals)
+	return e
+}
+
+// Bytes adds the field key with val as a string to the *Event context.
+//
+// Runes outside of normal ASCII ranges will be hex-encoded in the resulting
+// JSON.
+func (e *Event) Bytes(key string, val []byte) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendBytes(enc.AppendKey(e.buf, key), val)
+	return e
+}
+
+// Hex adds the field key with val as a hex string to the *Event context.
+func (e *Event) Hex(key string, val []byte) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendHex(enc.AppendKey(e.buf, key), val)
+	return e
+}
+
+// RawJSON adds already encoded JSON to the log line under key.
+//
+// No sanity check is performed on b; it must not contain carriage returns and
+// be valid JSON.
+func (e *Event) RawJSON(key string, b []byte) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = appendJSON(enc.AppendKey(e.buf, key), b)
+	return e
+}
+
+// AnErr adds the field key with serialized err to the *Event context.
+// If err is nil, no field is added.
+func (e *Event) AnErr(key string, err error) *Event {
+	marshaled := ErrorMarshalFunc(err)
+	switch m := marshaled.(type) {
+	case nil:
+		return e
+	case LogObjectMarshaler:
+		return e.Object(key, m)
+	case error:
+		return e.Str(key, m.Error())
+	case string:
+		return e.Str(key, m)
+	default:
+		return e.Interface(key, m)
+	}
+}
+
+// Errs adds the field key with errs as an array of serialized errors to the
+// *Event context.
+func (e *Event) Errs(key string, errs []error) *Event {
+	if e == nil {
+		return e
+	}
+
+	arr := Arr()
+	for _, err := range errs {
+		marshaled := ErrorMarshalFunc(err)
+		switch m := marshaled.(type) {
+		case LogObjectMarshaler:
+			arr = arr.Object(m)
+		case error:
+			arr = arr.Err(m)
+		case string:
+			arr = arr.Str(m)
+		default:
+			arr = arr.Interface(m)
+		}
+	}
+
+	return e.Array(key, arr)
+}
+
+// Err adds the field "error" with serialized err to the *Event context.
+// If err is nil, no field is added.
+// To customize the key name, change zerolog.ErrorFieldName.
+func (e *Event) Err(err error) *Event {
+	return e.AnErr(ErrorFieldName, err)
+}
+
+// Bool adds the field key with val as a bool to the *Event context.
+func (e *Event) Bool(key string, b bool) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendBool(enc.AppendKey(e.buf, key), b)
+	return e
+}
+
+// Bools adds the field key with val as a []bool to the *Event context.
+func (e *Event) Bools(key string, b []bool) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendBools(enc.AppendKey(e.buf, key), b)
+	return e
+}
+
+// Int adds the field key with i as a int to the *Event context.
+func (e *Event) Int(key string, i int) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendInt(enc.AppendKey(e.buf, key), i)
+	return e
+}
+
+// Ints adds the field key with i as a []int to the *Event context.
+func (e *Event) Ints(key string, i []int) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendInts(enc.AppendKey(e.buf, key), i)
+	return e
+}
+
+// Int8 adds the field key with i as a int8 to the *Event context.
+func (e *Event) Int8(key string, i int8) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendInt8(enc.AppendKey(e.buf, key), i)
+	return e
+}
+
+// Ints8 adds the field key with i as a []int8 to the *Event context.
+func (e *Event) Ints8(key string, i []int8) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendInts8(enc.AppendKey(e.buf, key), i)
+	return e
+}
+
+// Int16 adds the field key with i as a int16 to the *Event context.
+func (e *Event) Int16(key string, i int16) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendInt16(enc.AppendKey(e.buf, key), i)
+	return e
+}
+
+// Ints16 adds the field key with i as a []int16 to the *Event context.
+func (e *Event) Ints16(key string, i []int16) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendInts16(enc.AppendKey(e.buf, key), i)
+	return e
+}
+
+// Int32 adds the field key with i as a int32 to the *Event context.
+func (e *Event) Int32(key string, i int32) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendInt32(enc.AppendKey(e.buf, key), i)
+	return e
+}
+
+// Ints32 adds the field key with i as a []int32 to the *Event context.
+func (e *Event) Ints32(key string, i []int32) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendInts32(enc.AppendKey(e.buf, key), i)
+	return e
+}
+
+// Int64 adds the field key with i as a int64 to the *Event context.
+func (e *Event) Int64(key string, i int64) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendInt64(enc.AppendKey(e.buf, key), i)
+	return e
+}
+
+// Ints64 adds the field key with i as a []int64 to the *Event context.
+func (e *Event) Ints64(key string, i []int64) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendInts64(enc.AppendKey(e.buf, key), i)
+	return e
+}
+
+// Uint adds the field key with i as a uint to the *Event context.
+func (e *Event) Uint(key string, i uint) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendUint(enc.AppendKey(e.buf, key), i)
+	return e
+}
+
+// Uints adds the field key with i as a []int to the *Event context.
+func (e *Event) Uints(key string, i []uint) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendUints(enc.AppendKey(e.buf, key), i)
+	return e
+}
+
+// Uint8 adds the field key with i as a uint8 to the *Event context.
+func (e *Event) Uint8(key string, i uint8) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendUint8(enc.AppendKey(e.buf, key), i)
+	return e
+}
+
+// Uints8 adds the field key with i as a []int8 to the *Event context.
+func (e *Event) Uints8(key string, i []uint8) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendUints8(enc.AppendKey(e.buf, key), i)
+	return e
+}
+
+// Uint16 adds the field key with i as a uint16 to the *Event context.
+func (e *Event) Uint16(key string, i uint16) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendUint16(enc.AppendKey(e.buf, key), i)
+	return e
+}
+
+// Uints16 adds the field key with i as a []int16 to the *Event context.
+func (e *Event) Uints16(key string, i []uint16) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendUints16(enc.AppendKey(e.buf, key), i)
+	return e
+}
+
+// Uint32 adds the field key with i as a uint32 to the *Event context.
+func (e *Event) Uint32(key string, i uint32) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendUint32(enc.AppendKey(e.buf, key), i)
+	return e
+}
+
+// Uints32 adds the field key with i as a []int32 to the *Event context.
+func (e *Event) Uints32(key string, i []uint32) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendUints32(enc.AppendKey(e.buf, key), i)
+	return e
+}
+
+// Uint64 adds the field key with i as a uint64 to the *Event context.
+func (e *Event) Uint64(key string, i uint64) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendUint64(enc.AppendKey(e.buf, key), i)
+	return e
+}
+
+// Uints64 adds the field key with i as a []int64 to the *Event context.
+func (e *Event) Uints64(key string, i []uint64) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendUints64(enc.AppendKey(e.buf, key), i)
+	return e
+}
+
+// Float32 adds the field key with f as a float32 to the *Event context.
+func (e *Event) Float32(key string, f float32) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendFloat32(enc.AppendKey(e.buf, key), f)
+	return e
+}
+
+// Floats32 adds the field key with f as a []float32 to the *Event context.
+func (e *Event) Floats32(key string, f []float32) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendFloats32(enc.AppendKey(e.buf, key), f)
+	return e
+}
+
+// Float64 adds the field key with f as a float64 to the *Event context.
+func (e *Event) Float64(key string, f float64) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendFloat64(enc.AppendKey(e.buf, key), f)
+	return e
+}
+
+// Floats64 adds the field key with f as a []float64 to the *Event context.
+func (e *Event) Floats64(key string, f []float64) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendFloats64(enc.AppendKey(e.buf, key), f)
+	return e
+}
+
+// Timestamp adds the current local time as UNIX timestamp to the *Event context with the "time" key.
+// To customize the key name, change zerolog.TimestampFieldName.
+//
+// NOTE: It won't dedupe the "time" key if the *Event (or *Context) has one
+// already.
+func (e *Event) Timestamp() *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendTime(enc.AppendKey(e.buf, TimestampFieldName), TimestampFunc(), TimeFieldFormat)
+	return e
+}
+
+// Time adds the field key with t formated as string using zerolog.TimeFieldFormat.
+func (e *Event) Time(key string, t time.Time) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendTime(enc.AppendKey(e.buf, key), t, TimeFieldFormat)
+	return e
+}
+
+// Times adds the field key with t formated as string using zerolog.TimeFieldFormat.
+func (e *Event) Times(key string, t []time.Time) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendTimes(enc.AppendKey(e.buf, key), t, TimeFieldFormat)
+	return e
+}
+
+// Dur adds the field key with duration d stored as zerolog.DurationFieldUnit.
+// If zerolog.DurationFieldInteger is true, durations are rendered as integer
+// instead of float.
+func (e *Event) Dur(key string, d time.Duration) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
+	return e
+}
+
+// Durs adds the field key with duration d stored as zerolog.DurationFieldUnit.
+// If zerolog.DurationFieldInteger is true, durations are rendered as integer
+// instead of float.
+func (e *Event) Durs(key string, d []time.Duration) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendDurations(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
+	return e
+}
+
+// TimeDiff adds the field key with positive duration between time t and start.
+// If time t is not greater than start, duration will be 0.
+// Duration format follows the same principle as Dur().
+func (e *Event) TimeDiff(key string, t time.Time, start time.Time) *Event {
+	if e == nil {
+		return e
+	}
+	var d time.Duration
+	if t.After(start) {
+		d = t.Sub(start)
+	}
+	e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger)
+	return e
+}
+
+// Interface adds the field key with i marshaled using reflection.
+func (e *Event) Interface(key string, i interface{}) *Event {
+	if e == nil {
+		return e
+	}
+	if obj, ok := i.(LogObjectMarshaler); ok {
+		return e.Object(key, obj)
+	}
+	e.buf = enc.AppendInterface(enc.AppendKey(e.buf, key), i)
+	return e
+}
+
+// Caller adds the file:line of the caller with the zerolog.CallerFieldName key.
+func (e *Event) Caller() *Event {
+	return e.caller(CallerSkipFrameCount)
+}
+
+func (e *Event) caller(skip int) *Event {
+	if e == nil {
+		return e
+	}
+	_, file, line, ok := runtime.Caller(skip)
+	if !ok {
+		return e
+	}
+	e.buf = enc.AppendString(enc.AppendKey(e.buf, CallerFieldName), file+":"+strconv.Itoa(line))
+	return e
+}
+
+// IPAddr adds IPv4 or IPv6 Address to the event
+func (e *Event) IPAddr(key string, ip net.IP) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendIPAddr(enc.AppendKey(e.buf, key), ip)
+	return e
+}
+
+// IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the event
+func (e *Event) IPPrefix(key string, pfx net.IPNet) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendIPPrefix(enc.AppendKey(e.buf, key), pfx)
+	return e
+}
+
+// MACAddr adds MAC address to the event
+func (e *Event) MACAddr(key string, ha net.HardwareAddr) *Event {
+	if e == nil {
+		return e
+	}
+	e.buf = enc.AppendMACAddr(enc.AppendKey(e.buf, key), ha)
+	return e
+}
diff --git a/vendor/github.com/rs/zerolog/fields.go b/vendor/github.com/rs/zerolog/fields.go
new file mode 100644
index 0000000..6b62ecc
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/fields.go
@@ -0,0 +1,242 @@
+package zerolog
+
+import (
+	"net"
+	"sort"
+	"time"
+)
+
+func appendFields(dst []byte, fields map[string]interface{}) []byte {
+	keys := make([]string, 0, len(fields))
+	for key := range fields {
+		keys = append(keys, key)
+	}
+	sort.Strings(keys)
+	for _, key := range keys {
+		dst = enc.AppendKey(dst, key)
+		val := fields[key]
+		if val, ok := val.(LogObjectMarshaler); ok {
+			e := newEvent(nil, 0)
+			e.buf = e.buf[:0]
+			e.appendObject(val)
+			dst = append(dst, e.buf...)
+			putEvent(e)
+			continue
+		}
+		switch val := val.(type) {
+		case string:
+			dst = enc.AppendString(dst, val)
+		case []byte:
+			dst = enc.AppendBytes(dst, val)
+		case error:
+			marshaled := ErrorMarshalFunc(val)
+			switch m := marshaled.(type) {
+			case LogObjectMarshaler:
+				e := newEvent(nil, 0)
+				e.buf = e.buf[:0]
+				e.appendObject(m)
+				dst = append(dst, e.buf...)
+				putEvent(e)
+			case error:
+				dst = enc.AppendString(dst, m.Error())
+			case string:
+				dst = enc.AppendString(dst, m)
+			default:
+				dst = enc.AppendInterface(dst, m)
+			}
+		case []error:
+			dst = enc.AppendArrayStart(dst)
+			for i, err := range val {
+				marshaled := ErrorMarshalFunc(err)
+				switch m := marshaled.(type) {
+				case LogObjectMarshaler:
+					e := newEvent(nil, 0)
+					e.buf = e.buf[:0]
+					e.appendObject(m)
+					dst = append(dst, e.buf...)
+					putEvent(e)
+				case error:
+					dst = enc.AppendString(dst, m.Error())
+				case string:
+					dst = enc.AppendString(dst, m)
+				default:
+					dst = enc.AppendInterface(dst, m)
+				}
+
+				if i < (len(val) - 1) {
+					enc.AppendArrayDelim(dst)
+				}
+			}
+			dst = enc.AppendArrayEnd(dst)
+		case bool:
+			dst = enc.AppendBool(dst, val)
+		case int:
+			dst = enc.AppendInt(dst, val)
+		case int8:
+			dst = enc.AppendInt8(dst, val)
+		case int16:
+			dst = enc.AppendInt16(dst, val)
+		case int32:
+			dst = enc.AppendInt32(dst, val)
+		case int64:
+			dst = enc.AppendInt64(dst, val)
+		case uint:
+			dst = enc.AppendUint(dst, val)
+		case uint8:
+			dst = enc.AppendUint8(dst, val)
+		case uint16:
+			dst = enc.AppendUint16(dst, val)
+		case uint32:
+			dst = enc.AppendUint32(dst, val)
+		case uint64:
+			dst = enc.AppendUint64(dst, val)
+		case float32:
+			dst = enc.AppendFloat32(dst, val)
+		case float64:
+			dst = enc.AppendFloat64(dst, val)
+		case time.Time:
+			dst = enc.AppendTime(dst, val, TimeFieldFormat)
+		case time.Duration:
+			dst = enc.AppendDuration(dst, val, DurationFieldUnit, DurationFieldInteger)
+		case *string:
+			if val != nil {
+				dst = enc.AppendString(dst, *val)
+			} else {
+				dst = enc.AppendNil(dst)
+			}
+		case *bool:
+			if val != nil {
+				dst = enc.AppendBool(dst, *val)
+			} else {
+				dst = enc.AppendNil(dst)
+			}
+		case *int:
+			if val != nil {
+				dst = enc.AppendInt(dst, *val)
+			} else {
+				dst = enc.AppendNil(dst)
+			}
+		case *int8:
+			if val != nil {
+				dst = enc.AppendInt8(dst, *val)
+			} else {
+				dst = enc.AppendNil(dst)
+			}
+		case *int16:
+			if val != nil {
+				dst = enc.AppendInt16(dst, *val)
+			} else {
+				dst = enc.AppendNil(dst)
+			}
+		case *int32:
+			if val != nil {
+				dst = enc.AppendInt32(dst, *val)
+			} else {
+				dst = enc.AppendNil(dst)
+			}
+		case *int64:
+			if val != nil {
+				dst = enc.AppendInt64(dst, *val)
+			} else {
+				dst = enc.AppendNil(dst)
+			}
+		case *uint:
+			if val != nil {
+				dst = enc.AppendUint(dst, *val)
+			} else {
+				dst = enc.AppendNil(dst)
+			}
+		case *uint8:
+			if val != nil {
+				dst = enc.AppendUint8(dst, *val)
+			} else {
+				dst = enc.AppendNil(dst)
+			}
+		case *uint16:
+			if val != nil {
+				dst = enc.AppendUint16(dst, *val)
+			} else {
+				dst = enc.AppendNil(dst)
+			}
+		case *uint32:
+			if val != nil {
+				dst = enc.AppendUint32(dst, *val)
+			} else {
+				dst = enc.AppendNil(dst)
+			}
+		case *uint64:
+			if val != nil {
+				dst = enc.AppendUint64(dst, *val)
+			} else {
+				dst = enc.AppendNil(dst)
+			}
+		case *float32:
+			if val != nil {
+				dst = enc.AppendFloat32(dst, *val)
+			} else {
+				dst = enc.AppendNil(dst)
+			}
+		case *float64:
+			if val != nil {
+				dst = enc.AppendFloat64(dst, *val)
+			} else {
+				dst = enc.AppendNil(dst)
+			}
+		case *time.Time:
+			if val != nil {
+				dst = enc.AppendTime(dst, *val, TimeFieldFormat)
+			} else {
+				dst = enc.AppendNil(dst)
+			}
+		case *time.Duration:
+			if val != nil {
+				dst = enc.AppendDuration(dst, *val, DurationFieldUnit, DurationFieldInteger)
+			} else {
+				dst = enc.AppendNil(dst)
+			}
+		case []string:
+			dst = enc.AppendStrings(dst, val)
+		case []bool:
+			dst = enc.AppendBools(dst, val)
+		case []int:
+			dst = enc.AppendInts(dst, val)
+		case []int8:
+			dst = enc.AppendInts8(dst, val)
+		case []int16:
+			dst = enc.AppendInts16(dst, val)
+		case []int32:
+			dst = enc.AppendInts32(dst, val)
+		case []int64:
+			dst = enc.AppendInts64(dst, val)
+		case []uint:
+			dst = enc.AppendUints(dst, val)
+		// case []uint8:
+		// 	dst = enc.AppendUints8(dst, val)
+		case []uint16:
+			dst = enc.AppendUints16(dst, val)
+		case []uint32:
+			dst = enc.AppendUints32(dst, val)
+		case []uint64:
+			dst = enc.AppendUints64(dst, val)
+		case []float32:
+			dst = enc.AppendFloats32(dst, val)
+		case []float64:
+			dst = enc.AppendFloats64(dst, val)
+		case []time.Time:
+			dst = enc.AppendTimes(dst, val, TimeFieldFormat)
+		case []time.Duration:
+			dst = enc.AppendDurations(dst, val, DurationFieldUnit, DurationFieldInteger)
+		case nil:
+			dst = enc.AppendNil(dst)
+		case net.IP:
+			dst = enc.AppendIPAddr(dst, val)
+		case net.IPNet:
+			dst = enc.AppendIPPrefix(dst, val)
+		case net.HardwareAddr:
+			dst = enc.AppendMACAddr(dst, val)
+		default:
+			dst = enc.AppendInterface(dst, val)
+		}
+	}
+	return dst
+}
diff --git a/vendor/github.com/rs/zerolog/globals.go b/vendor/github.com/rs/zerolog/globals.go
new file mode 100644
index 0000000..1c66904
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/globals.go
@@ -0,0 +1,76 @@
+package zerolog
+
+import "time"
+import "sync/atomic"
+
+var (
+	// TimestampFieldName is the field name used for the timestamp field.
+	TimestampFieldName = "time"
+
+	// LevelFieldName is the field name used for the level field.
+	LevelFieldName = "level"
+
+	// MessageFieldName is the field name used for the message field.
+	MessageFieldName = "message"
+
+	// ErrorFieldName is the field name used for error fields.
+	ErrorFieldName = "error"
+
+	// CallerFieldName is the field name used for caller field.
+	CallerFieldName = "caller"
+
+	// CallerSkipFrameCount is the number of stack frames to skip to find the caller.
+	CallerSkipFrameCount = 2
+
+	// TimeFieldFormat defines the time format of the Time field type.
+	// If set to an empty string, the time is formatted as an UNIX timestamp
+	// as integer.
+	TimeFieldFormat = time.RFC3339
+
+	// TimestampFunc defines the function called to generate a timestamp.
+	TimestampFunc = time.Now
+
+	// DurationFieldUnit defines the unit for time.Duration type fields added
+	// using the Dur method.
+	DurationFieldUnit = time.Millisecond
+
+	// DurationFieldInteger renders Dur fields as integer instead of float if
+	// set to true.
+	DurationFieldInteger = false
+
+	// ErrorHandler is called whenever zerolog fails to write an event on its
+	// output. If not set, an error is printed on the stderr. This handler must
+	// be thread safe and non-blocking.
+	ErrorHandler func(err error)
+)
+
+var (
+	gLevel          = new(uint32)
+	disableSampling = new(uint32)
+)
+
+// SetGlobalLevel sets the global override for log level. If this
+// values is raised, all Loggers will use at least this value.
+//
+// To globally disable logs, set GlobalLevel to Disabled.
+func SetGlobalLevel(l Level) {
+	atomic.StoreUint32(gLevel, uint32(l))
+}
+
+// GlobalLevel returns the current global log level
+func GlobalLevel() Level {
+	return Level(atomic.LoadUint32(gLevel))
+}
+
+// DisableSampling will disable sampling in all Loggers if true.
+func DisableSampling(v bool) {
+	var i uint32
+	if v {
+		i = 1
+	}
+	atomic.StoreUint32(disableSampling, i)
+}
+
+func samplingDisabled() bool {
+	return atomic.LoadUint32(disableSampling) == 1
+}
diff --git a/vendor/github.com/rs/zerolog/go.mod b/vendor/github.com/rs/zerolog/go.mod
new file mode 100644
index 0000000..ed79427
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/go.mod
@@ -0,0 +1 @@
+module github.com/rs/zerolog
diff --git a/vendor/github.com/rs/zerolog/hook.go b/vendor/github.com/rs/zerolog/hook.go
new file mode 100644
index 0000000..08133ac
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/hook.go
@@ -0,0 +1,60 @@
+package zerolog
+
+// Hook defines an interface to a log hook.
+type Hook interface {
+	// Run runs the hook with the event.
+	Run(e *Event, level Level, message string)
+}
+
+// HookFunc is an adaptor to allow the use of an ordinary function
+// as a Hook.
+type HookFunc func(e *Event, level Level, message string)
+
+// Run implements the Hook interface.
+func (h HookFunc) Run(e *Event, level Level, message string) {
+	h(e, level, message)
+}
+
+// LevelHook applies a different hook for each level.
+type LevelHook struct {
+	NoLevelHook, DebugHook, InfoHook, WarnHook, ErrorHook, FatalHook, PanicHook Hook
+}
+
+// Run implements the Hook interface.
+func (h LevelHook) Run(e *Event, level Level, message string) {
+	switch level {
+	case DebugLevel:
+		if h.DebugHook != nil {
+			h.DebugHook.Run(e, level, message)
+		}
+	case InfoLevel:
+		if h.InfoHook != nil {
+			h.InfoHook.Run(e, level, message)
+		}
+	case WarnLevel:
+		if h.WarnHook != nil {
+			h.WarnHook.Run(e, level, message)
+		}
+	case ErrorLevel:
+		if h.ErrorHook != nil {
+			h.ErrorHook.Run(e, level, message)
+		}
+	case FatalLevel:
+		if h.FatalHook != nil {
+			h.FatalHook.Run(e, level, message)
+		}
+	case PanicLevel:
+		if h.PanicHook != nil {
+			h.PanicHook.Run(e, level, message)
+		}
+	case NoLevel:
+		if h.NoLevelHook != nil {
+			h.NoLevelHook.Run(e, level, message)
+		}
+	}
+}
+
+// NewLevelHook returns a new LevelHook.
+func NewLevelHook() LevelHook {
+	return LevelHook{}
+}
diff --git a/vendor/github.com/rs/zerolog/internal/cbor/README.md b/vendor/github.com/rs/zerolog/internal/cbor/README.md
new file mode 100644
index 0000000..92c2e8c
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/cbor/README.md
@@ -0,0 +1,56 @@
+## Reference:
+   CBOR Encoding is described in [RFC7049](https://tools.ietf.org/html/rfc7049)
+
+## Comparison of JSON vs CBOR
+
+Two main areas of reduction are:
+
+1. CPU usage to write a log msg 
+2. Size (in bytes) of log messages.
+
+
+CPU Usage savings are below:
+```
+name                                    JSON time/op    CBOR time/op   delta
+Info-32                                   15.3ns ± 1%    11.7ns ± 3%  -23.78%  (p=0.000 n=9+10)      
+ContextFields-32                          16.2ns ± 2%    12.3ns ± 3%  -23.97%  (p=0.000 n=9+9)       
+ContextAppend-32                          6.70ns ± 0%    6.20ns ± 0%   -7.44%  (p=0.000 n=9+9)       
+LogFields-32                              66.4ns ± 0%    24.6ns ± 2%  -62.89%  (p=0.000 n=10+9)      
+LogArrayObject-32                          911ns ±11%     768ns ± 6%  -15.64%  (p=0.000 n=10+10)     
+LogFieldType/Floats-32                    70.3ns ± 2%    29.5ns ± 1%  -57.98%  (p=0.000 n=10+10)     
+LogFieldType/Err-32                       14.0ns ± 3%    12.1ns ± 8%  -13.20%  (p=0.000 n=8+10)      
+LogFieldType/Dur-32                       17.2ns ± 2%    13.1ns ± 1%  -24.27%  (p=0.000 n=10+9)      
+LogFieldType/Object-32                    54.3ns ±11%    52.3ns ± 7%     ~     (p=0.239 n=10+10)     
+LogFieldType/Ints-32                      20.3ns ± 2%    15.1ns ± 2%  -25.50%  (p=0.000 n=9+10)      
+LogFieldType/Interfaces-32                 642ns ±11%     621ns ± 9%     ~     (p=0.118 n=10+10)     
+LogFieldType/Interface(Objects)-32         635ns ±13%     632ns ± 9%     ~     (p=0.592 n=10+10)     
+LogFieldType/Times-32                      294ns ± 0%      27ns ± 1%  -90.71%  (p=0.000 n=10+9)      
+LogFieldType/Durs-32                       121ns ± 0%      33ns ± 2%  -72.44%  (p=0.000 n=9+9)       
+LogFieldType/Interface(Object)-32         56.6ns ± 8%    52.3ns ± 8%   -7.54%  (p=0.007 n=10+10)     
+LogFieldType/Errs-32                      17.8ns ± 3%    16.1ns ± 2%   -9.71%  (p=0.000 n=10+9)      
+LogFieldType/Time-32                      40.5ns ± 1%    12.7ns ± 6%  -68.66%  (p=0.000 n=8+9)       
+LogFieldType/Bool-32                      12.0ns ± 5%    10.2ns ± 2%  -15.18%  (p=0.000 n=10+8)      
+LogFieldType/Bools-32                     17.2ns ± 2%    12.6ns ± 4%  -26.63%  (p=0.000 n=10+10)     
+LogFieldType/Int-32                       12.3ns ± 2%    11.2ns ± 4%   -9.27%  (p=0.000 n=9+10)      
+LogFieldType/Float-32                     16.7ns ± 1%    12.6ns ± 2%  -24.42%  (p=0.000 n=7+9)       
+LogFieldType/Str-32                       12.7ns ± 7%    11.3ns ± 7%  -10.88%  (p=0.000 n=10+9)      
+LogFieldType/Strs-32                      20.3ns ± 3%    18.2ns ± 3%  -10.25%  (p=0.000 n=9+10)      
+LogFieldType/Interface-32                  183ns ±12%     175ns ± 9%     ~     (p=0.078 n=10+10)     
+```
+
+Log message size savings is greatly dependent on the number and type of fields in the log message.
+Assuming this log message (with an Integer, timestamp and string, in addition to level).
+
+`{"level":"error","Fault":41650,"time":"2018-04-01T15:18:19-07:00","message":"Some Message"}`
+
+Two measurements were done for the log file sizes - one without any compression, second
+using [compress/zlib](https://golang.org/pkg/compress/zlib/). 
+
+Results for 10,000 log messages:
+
+| Log Format |  Plain File Size (in KB) | Compressed File Size (in KB) |
+| :--- | :---: | :---: |
+| JSON | 920 | 28 |
+| CBOR | 550 | 28 |
+
+The example used to calculate the above data is available in [Examples](examples).
diff --git a/vendor/github.com/rs/zerolog/internal/cbor/base.go b/vendor/github.com/rs/zerolog/internal/cbor/base.go
new file mode 100644
index 0000000..58cd082
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/cbor/base.go
@@ -0,0 +1,11 @@
+package cbor
+
+type Encoder struct{}
+
+// AppendKey adds a key (string) to the binary encoded log message
+func (e Encoder) AppendKey(dst []byte, key string) []byte {
+	if len(dst) < 1 {
+		dst = e.AppendBeginMarker(dst)
+	}
+	return e.AppendString(dst, key)
+}
\ No newline at end of file
diff --git a/vendor/github.com/rs/zerolog/internal/cbor/cbor.go b/vendor/github.com/rs/zerolog/internal/cbor/cbor.go
new file mode 100644
index 0000000..969f591
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/cbor/cbor.go
@@ -0,0 +1,100 @@
+// Package cbor provides primitives for storing different data
+// in the CBOR (binary) format. CBOR is defined in RFC7049.
+package cbor
+
+import "time"
+
+const (
+	majorOffset   = 5
+	additionalMax = 23
+
+	// Non Values.
+	additionalTypeBoolFalse byte = 20
+	additionalTypeBoolTrue  byte = 21
+	additionalTypeNull      byte = 22
+
+	// Integer (+ve and -ve) Sub-types.
+	additionalTypeIntUint8  byte = 24
+	additionalTypeIntUint16 byte = 25
+	additionalTypeIntUint32 byte = 26
+	additionalTypeIntUint64 byte = 27
+
+	// Float Sub-types.
+	additionalTypeFloat16 byte = 25
+	additionalTypeFloat32 byte = 26
+	additionalTypeFloat64 byte = 27
+	additionalTypeBreak   byte = 31
+
+	// Tag Sub-types.
+	additionalTypeTimestamp byte = 01
+
+	// Extended Tags - from https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml
+	additionalTypeTagNetworkAddr   uint16 = 260
+	additionalTypeTagNetworkPrefix uint16 = 261
+	additionalTypeEmbeddedJSON     uint16 = 262
+	additionalTypeTagHexString     uint16 = 263
+
+	// Unspecified number of elements.
+	additionalTypeInfiniteCount byte = 31
+)
+const (
+	majorTypeUnsignedInt    byte = iota << majorOffset // Major type 0
+	majorTypeNegativeInt                               // Major type 1
+	majorTypeByteString                                // Major type 2
+	majorTypeUtf8String                                // Major type 3
+	majorTypeArray                                     // Major type 4
+	majorTypeMap                                       // Major type 5
+	majorTypeTags                                      // Major type 6
+	majorTypeSimpleAndFloat                            // Major type 7
+)
+
+const (
+	maskOutAdditionalType byte = (7 << majorOffset)
+	maskOutMajorType      byte = 31
+)
+
+const (
+	float32Nan         = "\xfa\x7f\xc0\x00\x00"
+	float32PosInfinity = "\xfa\x7f\x80\x00\x00"
+	float32NegInfinity = "\xfa\xff\x80\x00\x00"
+	float64Nan         = "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00"
+	float64PosInfinity = "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00"
+	float64NegInfinity = "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00"
+)
+
+// IntegerTimeFieldFormat indicates the format of timestamp decoded
+// from an integer (time in seconds).
+var IntegerTimeFieldFormat = time.RFC3339
+
+// NanoTimeFieldFormat indicates the format of timestamp decoded
+// from a float value (time in seconds and nano seconds).
+var NanoTimeFieldFormat = time.RFC3339Nano
+
+func appendCborTypePrefix(dst []byte, major byte, number uint64) []byte {
+	byteCount := 8
+	var minor byte
+	switch {
+	case number < 256:
+		byteCount = 1
+		minor = additionalTypeIntUint8
+
+	case number < 65536:
+		byteCount = 2
+		minor = additionalTypeIntUint16
+
+	case number < 4294967296:
+		byteCount = 4
+		minor = additionalTypeIntUint32
+
+	default:
+		byteCount = 8
+		minor = additionalTypeIntUint64
+
+	}
+	dst = append(dst, byte(major|minor))
+	byteCount--
+	for ; byteCount >= 0; byteCount-- {
+		dst = append(dst, byte(number>>(uint(byteCount)*8)))
+	}
+	return dst
+}
diff --git a/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go b/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go
new file mode 100644
index 0000000..e3cf3b7
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go
@@ -0,0 +1,614 @@
+package cbor
+
+// This file contains code to decode a stream of CBOR Data into JSON.
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"math"
+	"net"
+	"runtime"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+)
+
+var decodeTimeZone *time.Location
+
+const hexTable = "0123456789abcdef"
+
+const isFloat32 = 4
+const isFloat64 = 8
+
+func readNBytes(src *bufio.Reader, n int) []byte {
+	ret := make([]byte, n)
+	for i := 0; i < n; i++ {
+		ch, e := src.ReadByte()
+		if e != nil {
+			panic(fmt.Errorf("Tried to Read %d Bytes.. But hit end of file", n))
+		}
+		ret[i] = ch
+	}
+	return ret
+}
+
+func readByte(src *bufio.Reader) byte {
+	b, e := src.ReadByte()
+	if e != nil {
+		panic(fmt.Errorf("Tried to Read 1 Byte.. But hit end of file"))
+	}
+	return b
+}
+
+func decodeIntAdditonalType(src *bufio.Reader, minor byte) int64 {
+	val := int64(0)
+	if minor <= 23 {
+		val = int64(minor)
+	} else {
+		bytesToRead := 0
+		switch minor {
+		case additionalTypeIntUint8:
+			bytesToRead = 1
+		case additionalTypeIntUint16:
+			bytesToRead = 2
+		case additionalTypeIntUint32:
+			bytesToRead = 4
+		case additionalTypeIntUint64:
+			bytesToRead = 8
+		default:
+			panic(fmt.Errorf("Invalid Additional Type: %d in decodeInteger (expected <28)", minor))
+		}
+		pb := readNBytes(src, bytesToRead)
+		for i := 0; i < bytesToRead; i++ {
+			val = val * 256
+			val += int64(pb[i])
+		}
+	}
+	return val
+}
+
+func decodeInteger(src *bufio.Reader) int64 {
+	pb := readByte(src)
+	major := pb & maskOutAdditionalType
+	minor := pb & maskOutMajorType
+	if major != majorTypeUnsignedInt && major != majorTypeNegativeInt {
+		panic(fmt.Errorf("Major type is: %d in decodeInteger!! (expected 0 or 1)", major))
+	}
+	val := decodeIntAdditonalType(src, minor)
+	if major == 0 {
+		return val
+	}
+	return (-1 - val)
+}
+
+func decodeFloat(src *bufio.Reader) (float64, int) {
+	pb := readByte(src)
+	major := pb & maskOutAdditionalType
+	minor := pb & maskOutMajorType
+	if major != majorTypeSimpleAndFloat {
+		panic(fmt.Errorf("Incorrect Major type is: %d in decodeFloat", major))
+	}
+
+	switch minor {
+	case additionalTypeFloat16:
+		panic(fmt.Errorf("float16 is not suppported in decodeFloat"))
+
+	case additionalTypeFloat32:
+		pb := readNBytes(src, 4)
+		switch string(pb) {
+		case float32Nan:
+			return math.NaN(), isFloat32
+		case float32PosInfinity:
+			return math.Inf(0), isFloat32
+		case float32NegInfinity:
+			return math.Inf(-1), isFloat32
+		}
+		n := uint32(0)
+		for i := 0; i < 4; i++ {
+			n = n * 256
+			n += uint32(pb[i])
+		}
+		val := math.Float32frombits(n)
+		return float64(val), isFloat32
+	case additionalTypeFloat64:
+		pb := readNBytes(src, 8)
+		switch string(pb) {
+		case float64Nan:
+			return math.NaN(), isFloat64
+		case float64PosInfinity:
+			return math.Inf(0), isFloat64
+		case float64NegInfinity:
+			return math.Inf(-1), isFloat64
+		}
+		n := uint64(0)
+		for i := 0; i < 8; i++ {
+			n = n * 256
+			n += uint64(pb[i])
+		}
+		val := math.Float64frombits(n)
+		return val, isFloat64
+	}
+	panic(fmt.Errorf("Invalid Additional Type: %d in decodeFloat", minor))
+}
+
+func decodeStringComplex(dst []byte, s string, pos uint) []byte {
+	i := int(pos)
+	start := 0
+
+	for i < len(s) {
+		b := s[i]
+		if b >= utf8.RuneSelf {
+			r, size := utf8.DecodeRuneInString(s[i:])
+			if r == utf8.RuneError && size == 1 {
+				// In case of error, first append previous simple characters to
+				// the byte slice if any and append a replacement character code
+				// in place of the invalid sequence.
+				if start < i {
+					dst = append(dst, s[start:i]...)
+				}
+				dst = append(dst, `\ufffd`...)
+				i += size
+				start = i
+				continue
+			}
+			i += size
+			continue
+		}
+		if b >= 0x20 && b <= 0x7e && b != '\\' && b != '"' {
+			i++
+			continue
+		}
+		// We encountered a character that needs to be encoded.
+		// Let's append the previous simple characters to the byte slice
+		// and switch our operation to read and encode the remainder
+		// characters byte-by-byte.
+		if start < i {
+			dst = append(dst, s[start:i]...)
+		}
+		switch b {
+		case '"', '\\':
+			dst = append(dst, '\\', b)
+		case '\b':
+			dst = append(dst, '\\', 'b')
+		case '\f':
+			dst = append(dst, '\\', 'f')
+		case '\n':
+			dst = append(dst, '\\', 'n')
+		case '\r':
+			dst = append(dst, '\\', 'r')
+		case '\t':
+			dst = append(dst, '\\', 't')
+		default:
+			dst = append(dst, '\\', 'u', '0', '0', hexTable[b>>4], hexTable[b&0xF])
+		}
+		i++
+		start = i
+	}
+	if start < len(s) {
+		dst = append(dst, s[start:]...)
+	}
+	return dst
+}
+
+func decodeString(src *bufio.Reader, noQuotes bool) []byte {
+	pb := readByte(src)
+	major := pb & maskOutAdditionalType
+	minor := pb & maskOutMajorType
+	if major != majorTypeByteString {
+		panic(fmt.Errorf("Major type is: %d in decodeString", major))
+	}
+	result := []byte{}
+	if !noQuotes {
+		result = append(result, '"')
+	}
+	length := decodeIntAdditonalType(src, minor)
+	len := int(length)
+	pbs := readNBytes(src, len)
+	result = append(result, pbs...)
+	if noQuotes {
+		return result
+	}
+	return append(result, '"')
+}
+
+func decodeUTF8String(src *bufio.Reader) []byte {
+	pb := readByte(src)
+	major := pb & maskOutAdditionalType
+	minor := pb & maskOutMajorType
+	if major != majorTypeUtf8String {
+		panic(fmt.Errorf("Major type is: %d in decodeUTF8String", major))
+	}
+	result := []byte{'"'}
+	length := decodeIntAdditonalType(src, minor)
+	len := int(length)
+	pbs := readNBytes(src, len)
+
+	for i := 0; i < len; i++ {
+		// Check if the character needs encoding. Control characters, slashes,
+		// and the double quote need json encoding. Bytes above the ascii
+		// boundary needs utf8 encoding.
+		if pbs[i] < 0x20 || pbs[i] > 0x7e || pbs[i] == '\\' || pbs[i] == '"' {
+			// We encountered a character that needs to be encoded. Switch
+			// to complex version of the algorithm.
+			dst := []byte{'"'}
+			dst = decodeStringComplex(dst, string(pbs), uint(i))
+			return append(dst, '"')
+		}
+	}
+	// The string has no need for encoding an therefore is directly
+	// appended to the byte slice.
+	result = append(result, pbs...)
+	return append(result, '"')
+}
+
+func array2Json(src *bufio.Reader, dst io.Writer) {
+	dst.Write([]byte{'['})
+	pb := readByte(src)
+	major := pb & maskOutAdditionalType
+	minor := pb & maskOutMajorType
+	if major != majorTypeArray {
+		panic(fmt.Errorf("Major type is: %d in array2Json", major))
+	}
+	len := 0
+	unSpecifiedCount := false
+	if minor == additionalTypeInfiniteCount {
+		unSpecifiedCount = true
+	} else {
+		length := decodeIntAdditonalType(src, minor)
+		len = int(length)
+	}
+	for i := 0; unSpecifiedCount || i < len; i++ {
+		if unSpecifiedCount {
+			pb, e := src.Peek(1)
+			if e != nil {
+				panic(e)
+			}
+			if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) {
+				readByte(src)
+				break
+			}
+		}
+		cbor2JsonOneObject(src, dst)
+		if unSpecifiedCount {
+			pb, e := src.Peek(1)
+			if e != nil {
+				panic(e)
+			}
+			if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) {
+				readByte(src)
+				break
+			}
+			dst.Write([]byte{','})
+		} else if i+1 < len {
+			dst.Write([]byte{','})
+		}
+	}
+	dst.Write([]byte{']'})
+}
+
+func map2Json(src *bufio.Reader, dst io.Writer) {
+	pb := readByte(src)
+	major := pb & maskOutAdditionalType
+	minor := pb & maskOutMajorType
+	if major != majorTypeMap {
+		panic(fmt.Errorf("Major type is: %d in map2Json", major))
+	}
+	len := 0
+	unSpecifiedCount := false
+	if minor == additionalTypeInfiniteCount {
+		unSpecifiedCount = true
+	} else {
+		length := decodeIntAdditonalType(src, minor)
+		len = int(length)
+	}
+	dst.Write([]byte{'{'})
+	for i := 0; unSpecifiedCount || i < len; i++ {
+		if unSpecifiedCount {
+			pb, e := src.Peek(1)
+			if e != nil {
+				panic(e)
+			}
+			if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) {
+				readByte(src)
+				break
+			}
+		}
+		cbor2JsonOneObject(src, dst)
+		if i%2 == 0 {
+			// Even position values are keys.
+			dst.Write([]byte{':'})
+		} else {
+			if unSpecifiedCount {
+				pb, e := src.Peek(1)
+				if e != nil {
+					panic(e)
+				}
+				if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) {
+					readByte(src)
+					break
+				}
+				dst.Write([]byte{','})
+			} else if i+1 < len {
+				dst.Write([]byte{','})
+			}
+		}
+	}
+	dst.Write([]byte{'}'})
+}
+
+func decodeTagData(src *bufio.Reader) []byte {
+	pb := readByte(src)
+	major := pb & maskOutAdditionalType
+	minor := pb & maskOutMajorType
+	if major != majorTypeTags {
+		panic(fmt.Errorf("Major type is: %d in decodeTagData", major))
+	}
+	switch minor {
+	case additionalTypeTimestamp:
+		return decodeTimeStamp(src)
+
+	// Tag value is larger than 256 (so uint16).
+	case additionalTypeIntUint16:
+		val := decodeIntAdditonalType(src, minor)
+
+		switch uint16(val) {
+		case additionalTypeEmbeddedJSON:
+			pb := readByte(src)
+			dataMajor := pb & maskOutAdditionalType
+			if dataMajor != majorTypeByteString {
+				panic(fmt.Errorf("Unsupported embedded Type: %d in decodeEmbeddedJSON", dataMajor))
+			}
+			src.UnreadByte()
+			return decodeString(src, true)
+
+		case additionalTypeTagNetworkAddr:
+			octets := decodeString(src, true)
+			ss := []byte{'"'}
+			switch len(octets) {
+			case 6: // MAC address.
+				ha := net.HardwareAddr(octets)
+				ss = append(append(ss, ha.String()...), '"')
+			case 4: // IPv4 address.
+				fallthrough
+			case 16: // IPv6 address.
+				ip := net.IP(octets)
+				ss = append(append(ss, ip.String()...), '"')
+			default:
+				panic(fmt.Errorf("Unexpected Network Address length: %d (expected 4,6,16)", len(octets)))
+			}
+			return ss
+
+		case additionalTypeTagNetworkPrefix:
+			pb := readByte(src)
+			if pb != byte(majorTypeMap|0x1) {
+				panic(fmt.Errorf("IP Prefix is NOT of MAP of 1 elements as expected"))
+			}
+			octets := decodeString(src, true)
+			val := decodeInteger(src)
+			ip := net.IP(octets)
+			var mask net.IPMask
+			pfxLen := int(val)
+			if len(octets) == 4 {
+				mask = net.CIDRMask(pfxLen, 32)
+			} else {
+				mask = net.CIDRMask(pfxLen, 128)
+			}
+			ipPfx := net.IPNet{IP: ip, Mask: mask}
+			ss := []byte{'"'}
+			ss = append(append(ss, ipPfx.String()...), '"')
+			return ss
+
+		case additionalTypeTagHexString:
+			octets := decodeString(src, true)
+			ss := []byte{'"'}
+			for _, v := range octets {
+				ss = append(ss, hexTable[v>>4], hexTable[v&0x0f])
+			}
+			return append(ss, '"')
+
+		default:
+			panic(fmt.Errorf("Unsupported Additional Tag Type: %d in decodeTagData", val))
+		}
+	}
+	panic(fmt.Errorf("Unsupported Additional Type: %d in decodeTagData", minor))
+}
+
+func decodeTimeStamp(src *bufio.Reader) []byte {
+	pb := readByte(src)
+	src.UnreadByte()
+	tsMajor := pb & maskOutAdditionalType
+	if tsMajor == majorTypeUnsignedInt || tsMajor == majorTypeNegativeInt {
+		n := decodeInteger(src)
+		t := time.Unix(n, 0)
+		if decodeTimeZone != nil {
+			t = t.In(decodeTimeZone)
+		} else {
+			t = t.In(time.UTC)
+		}
+		tsb := []byte{}
+		tsb = append(tsb, '"')
+		tsb = t.AppendFormat(tsb, IntegerTimeFieldFormat)
+		tsb = append(tsb, '"')
+		return tsb
+	} else if tsMajor == majorTypeSimpleAndFloat {
+		n, _ := decodeFloat(src)
+		secs := int64(n)
+		n -= float64(secs)
+		n *= float64(1e9)
+		t := time.Unix(secs, int64(n))
+		if decodeTimeZone != nil {
+			t = t.In(decodeTimeZone)
+		} else {
+			t = t.In(time.UTC)
+		}
+		tsb := []byte{}
+		tsb = append(tsb, '"')
+		tsb = t.AppendFormat(tsb, NanoTimeFieldFormat)
+		tsb = append(tsb, '"')
+		return tsb
+	}
+	panic(fmt.Errorf("TS format is neigther int nor float: %d", tsMajor))
+}
+
+func decodeSimpleFloat(src *bufio.Reader) []byte {
+	pb := readByte(src)
+	major := pb & maskOutAdditionalType
+	minor := pb & maskOutMajorType
+	if major != majorTypeSimpleAndFloat {
+		panic(fmt.Errorf("Major type is: %d in decodeSimpleFloat", major))
+	}
+	switch minor {
+	case additionalTypeBoolTrue:
+		return []byte("true")
+	case additionalTypeBoolFalse:
+		return []byte("false")
+	case additionalTypeNull:
+		return []byte("null")
+	case additionalTypeFloat16:
+		fallthrough
+	case additionalTypeFloat32:
+		fallthrough
+	case additionalTypeFloat64:
+		src.UnreadByte()
+		v, bc := decodeFloat(src)
+		ba := []byte{}
+		switch {
+		case math.IsNaN(v):
+			return []byte("\"NaN\"")
+		case math.IsInf(v, 1):
+			return []byte("\"+Inf\"")
+		case math.IsInf(v, -1):
+			return []byte("\"-Inf\"")
+		}
+		if bc == isFloat32 {
+			ba = strconv.AppendFloat(ba, v, 'f', -1, 32)
+		} else if bc == isFloat64 {
+			ba = strconv.AppendFloat(ba, v, 'f', -1, 64)
+		} else {
+			panic(fmt.Errorf("Invalid Float precision from decodeFloat: %d", bc))
+		}
+		return ba
+	default:
+		panic(fmt.Errorf("Invalid Additional Type: %d in decodeSimpleFloat", minor))
+	}
+}
+
+func cbor2JsonOneObject(src *bufio.Reader, dst io.Writer) {
+	pb, e := src.Peek(1)
+	if e != nil {
+		panic(e)
+	}
+	major := (pb[0] & maskOutAdditionalType)
+
+	switch major {
+	case majorTypeUnsignedInt:
+		fallthrough
+	case majorTypeNegativeInt:
+		n := decodeInteger(src)
+		dst.Write([]byte(strconv.Itoa(int(n))))
+
+	case majorTypeByteString:
+		s := decodeString(src, false)
+		dst.Write(s)
+
+	case majorTypeUtf8String:
+		s := decodeUTF8String(src)
+		dst.Write(s)
+
+	case majorTypeArray:
+		array2Json(src, dst)
+
+	case majorTypeMap:
+		map2Json(src, dst)
+
+	case majorTypeTags:
+		s := decodeTagData(src)
+		dst.Write(s)
+
+	case majorTypeSimpleAndFloat:
+		s := decodeSimpleFloat(src)
+		dst.Write(s)
+	}
+}
+
+func moreBytesToRead(src *bufio.Reader) bool {
+	_, e := src.ReadByte()
+	if e == nil {
+		src.UnreadByte()
+		return true
+	}
+	return false
+}
+
+// Cbor2JsonManyObjects decodes all the CBOR Objects read from src
+// reader. It keeps on decoding until reader returns EOF (error when reading).
+// Decoded string is written to the dst. At the end of every CBOR Object
+// newline is written to the output stream.
+//
+// Returns error (if any) that was encountered during decode.
+// The child functions will generate a panic when error is encountered and
+// this function will recover non-runtime Errors and return the reason as error.
+func Cbor2JsonManyObjects(src io.Reader, dst io.Writer) (err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if _, ok := r.(runtime.Error); ok {
+				panic(r)
+			}
+			err = r.(error)
+		}
+	}()
+	bufRdr := bufio.NewReader(src)
+	for moreBytesToRead(bufRdr) {
+		cbor2JsonOneObject(bufRdr, dst)
+		dst.Write([]byte("\n"))
+	}
+	return nil
+}
+
+// Detect if the bytes to be printed is Binary or not.
+func binaryFmt(p []byte) bool {
+	if len(p) > 0 && p[0] > 0x7F {
+		return true
+	}
+	return false
+}
+
+func getReader(str string) *bufio.Reader {
+	return bufio.NewReader(strings.NewReader(str))
+}
+
+// DecodeIfBinaryToString converts a binary formatted log msg to a
+// JSON formatted String Log message - suitable for printing to Console/Syslog.
+func DecodeIfBinaryToString(in []byte) string {
+	if binaryFmt(in) {
+		var b bytes.Buffer
+		Cbor2JsonManyObjects(strings.NewReader(string(in)), &b)
+		return b.String()
+	}
+	return string(in)
+}
+
+// DecodeObjectToStr checks if the input is a binary format, if so,
+// it will decode a single Object and return the decoded string.
+func DecodeObjectToStr(in []byte) string {
+	if binaryFmt(in) {
+		var b bytes.Buffer
+		cbor2JsonOneObject(getReader(string(in)), &b)
+		return b.String()
+	}
+	return string(in)
+}
+
+// DecodeIfBinaryToBytes checks if the input is a binary format, if so,
+// it will decode all Objects and return the decoded string as byte array.
+func DecodeIfBinaryToBytes(in []byte) []byte {
+	if binaryFmt(in) {
+		var b bytes.Buffer
+		Cbor2JsonManyObjects(bytes.NewReader(in), &b)
+		return b.Bytes()
+	}
+	return in
+}
diff --git a/vendor/github.com/rs/zerolog/internal/cbor/string.go b/vendor/github.com/rs/zerolog/internal/cbor/string.go
new file mode 100644
index 0000000..ff42afa
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/cbor/string.go
@@ -0,0 +1,68 @@
+package cbor
+
+// AppendStrings encodes and adds an array of strings to the dst byte array.
+func (e Encoder) AppendStrings(dst []byte, vals []string) []byte {
+	major := majorTypeArray
+	l := len(vals)
+	if l <= additionalMax {
+		lb := byte(l)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, major, uint64(l))
+	}
+	for _, v := range vals {
+		dst = e.AppendString(dst, v)
+	}
+	return dst
+}
+
+// AppendString encodes and adds a string to the dst byte array.
+func (Encoder) AppendString(dst []byte, s string) []byte {
+	major := majorTypeUtf8String
+
+	l := len(s)
+	if l <= additionalMax {
+		lb := byte(l)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, majorTypeUtf8String, uint64(l))
+	}
+	return append(dst, s...)
+}
+
+// AppendBytes encodes and adds an array of bytes to the dst byte array.
+func (Encoder) AppendBytes(dst, s []byte) []byte {
+	major := majorTypeByteString
+
+	l := len(s)
+	if l <= additionalMax {
+		lb := byte(l)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, major, uint64(l))
+	}
+	return append(dst, s...)
+}
+
+// AppendEmbeddedJSON adds a tag and embeds input JSON as such.
+func AppendEmbeddedJSON(dst, s []byte) []byte {
+	major := majorTypeTags
+	minor := additionalTypeEmbeddedJSON
+
+	// Append the TAG to indicate this is Embedded JSON.
+	dst = append(dst, byte(major|additionalTypeIntUint16))
+	dst = append(dst, byte(minor>>8))
+	dst = append(dst, byte(minor&0xff))
+
+	// Append the JSON Object as Byte String.
+	major = majorTypeByteString
+
+	l := len(s)
+	if l <= additionalMax {
+		lb := byte(l)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, major, uint64(l))
+	}
+	return append(dst, s...)
+}
diff --git a/vendor/github.com/rs/zerolog/internal/cbor/time.go b/vendor/github.com/rs/zerolog/internal/cbor/time.go
new file mode 100644
index 0000000..12f6a1d
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/cbor/time.go
@@ -0,0 +1,93 @@
+package cbor
+
+import (
+	"time"
+)
+
+func appendIntegerTimestamp(dst []byte, t time.Time) []byte {
+	major := majorTypeTags
+	minor := additionalTypeTimestamp
+	dst = append(dst, byte(major|minor))
+	secs := t.Unix()
+	var val uint64
+	if secs < 0 {
+		major = majorTypeNegativeInt
+		val = uint64(-secs - 1)
+	} else {
+		major = majorTypeUnsignedInt
+		val = uint64(secs)
+	}
+	dst = appendCborTypePrefix(dst, major, uint64(val))
+	return dst
+}
+
+func (e Encoder) appendFloatTimestamp(dst []byte, t time.Time) []byte {
+	major := majorTypeTags
+	minor := additionalTypeTimestamp
+	dst = append(dst, byte(major|minor))
+	secs := t.Unix()
+	nanos := t.Nanosecond()
+	var val float64
+	val = float64(secs)*1.0 + float64(nanos)*1E-9
+	return e.AppendFloat64(dst, val)
+}
+
+// AppendTime encodes and adds a timestamp to the dst byte array.
+func (e Encoder) AppendTime(dst []byte, t time.Time, unused string) []byte {
+	utc := t.UTC()
+	if utc.Nanosecond() == 0 {
+		return appendIntegerTimestamp(dst, utc)
+	}
+	return e.appendFloatTimestamp(dst, utc)
+}
+
+// AppendTimes encodes and adds an array of timestamps to the dst byte array.
+func (e Encoder) AppendTimes(dst []byte, vals []time.Time, unused string) []byte {
+	major := majorTypeArray
+	l := len(vals)
+	if l == 0 {
+		return e.AppendArrayEnd(e.AppendArrayStart(dst))
+	}
+	if l <= additionalMax {
+		lb := byte(l)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, major, uint64(l))
+	}
+
+	for _, t := range vals {
+		dst = e.AppendTime(dst, t, unused)
+	}
+	return dst
+}
+
+// AppendDuration encodes and adds a duration to the dst byte array.
+// useInt field indicates whether to store the duration as seconds (integer) or
+// as seconds+nanoseconds (float).
+func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte {
+	if useInt {
+		return e.AppendInt64(dst, int64(d/unit))
+	}
+	return e.AppendFloat64(dst, float64(d)/float64(unit))
+}
+
+// AppendDurations encodes and adds an array of durations to the dst byte array.
+// useInt field indicates whether to store the duration as seconds (integer) or
+// as seconds+nanoseconds (float).
+func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte {
+	major := majorTypeArray
+	l := len(vals)
+	if l == 0 {
+		return e.AppendArrayEnd(e.AppendArrayStart(dst))
+	}
+	if l <= additionalMax {
+		lb := byte(l)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, major, uint64(l))
+	}
+	for _, d := range vals {
+		dst = e.AppendDuration(dst, d, unit, useInt)
+	}
+	return dst
+}
diff --git a/vendor/github.com/rs/zerolog/internal/cbor/types.go b/vendor/github.com/rs/zerolog/internal/cbor/types.go
new file mode 100644
index 0000000..eb4f697
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/cbor/types.go
@@ -0,0 +1,478 @@
+package cbor
+
+import (
+	"encoding/json"
+	"fmt"
+	"math"
+	"net"
+)
+
+// AppendNil inserts a 'Nil' object into the dst byte array.
+func (Encoder) AppendNil(dst []byte) []byte {
+	return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeNull))
+}
+
+// AppendBeginMarker inserts a map start into the dst byte array.
+func (Encoder) AppendBeginMarker(dst []byte) []byte {
+	return append(dst, byte(majorTypeMap|additionalTypeInfiniteCount))
+}
+
+// AppendEndMarker inserts a map end into the dst byte array.
+func (Encoder) AppendEndMarker(dst []byte) []byte {
+	return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeBreak))
+}
+
+// AppendObjectData takes an object in form of a byte array and appends to dst.
+func (Encoder) AppendObjectData(dst []byte, o []byte) []byte {
+        // BeginMarker is present in the dst, which
+        // should not be copied when appending to existing data.
+        return append(dst, o[1:]...)
+}
+
+// AppendArrayStart adds markers to indicate the start of an array.
+func (Encoder) AppendArrayStart(dst []byte) []byte {
+	return append(dst, byte(majorTypeArray|additionalTypeInfiniteCount))
+}
+
+// AppendArrayEnd adds markers to indicate the end of an array.
+func (Encoder) AppendArrayEnd(dst []byte) []byte {
+	return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeBreak))
+}
+
+// AppendArrayDelim adds markers to indicate end of a particular array element.
+func (Encoder) AppendArrayDelim(dst []byte) []byte {
+	//No delimiters needed in cbor
+	return dst
+}
+
+// AppendLineBreak is a noop that keep API compat with json encoder.
+func (Encoder) AppendLineBreak(dst []byte) []byte {
+	// No line breaks needed in binary format.
+	return dst
+}
+
+// AppendBool encodes and inserts a boolean value into the dst byte array.
+func (Encoder) AppendBool(dst []byte, val bool) []byte {
+	b := additionalTypeBoolFalse
+	if val {
+		b = additionalTypeBoolTrue
+	}
+	return append(dst, byte(majorTypeSimpleAndFloat|b))
+}
+
+// AppendBools encodes and inserts an array of boolean values into the dst byte array.
+func (e Encoder) AppendBools(dst []byte, vals []bool) []byte {
+	major := majorTypeArray
+	l := len(vals)
+	if l == 0 {
+		return e.AppendArrayEnd(e.AppendArrayStart(dst))
+	}
+	if l <= additionalMax {
+		lb := byte(l)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, major, uint64(l))
+	}
+	for _, v := range vals {
+		dst = e.AppendBool(dst, v)
+	}
+	return dst
+}
+
+// AppendInt encodes and inserts an integer value into the dst byte array.
+func (Encoder) AppendInt(dst []byte, val int) []byte {
+	major := majorTypeUnsignedInt
+	contentVal := val
+	if val < 0 {
+		major = majorTypeNegativeInt
+		contentVal = -val - 1
+	}
+	if contentVal <= additionalMax {
+		lb := byte(contentVal)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, major, uint64(contentVal))
+	}
+	return dst
+}
+
+// AppendInts encodes and inserts an array of integer values into the dst byte array.
+func (e Encoder) AppendInts(dst []byte, vals []int) []byte {
+	major := majorTypeArray
+	l := len(vals)
+	if l == 0 {
+		return e.AppendArrayEnd(e.AppendArrayStart(dst))
+	}
+	if l <= additionalMax {
+		lb := byte(l)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, major, uint64(l))
+	}
+	for _, v := range vals {
+		dst = e.AppendInt(dst, v)
+	}
+	return dst
+}
+
+// AppendInt8 encodes and inserts an int8 value into the dst byte array.
+func (e Encoder) AppendInt8(dst []byte, val int8) []byte {
+	return e.AppendInt(dst, int(val))
+}
+
+// AppendInts8 encodes and inserts an array of integer values into the dst byte array.
+func (e Encoder) AppendInts8(dst []byte, vals []int8) []byte {
+	major := majorTypeArray
+	l := len(vals)
+	if l == 0 {
+		return e.AppendArrayEnd(e.AppendArrayStart(dst))
+	}
+	if l <= additionalMax {
+		lb := byte(l)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, major, uint64(l))
+	}
+	for _, v := range vals {
+		dst = e.AppendInt(dst, int(v))
+	}
+	return dst
+}
+
+// AppendInt16 encodes and inserts a int16 value into the dst byte array.
+func (e Encoder) AppendInt16(dst []byte, val int16) []byte {
+	return e.AppendInt(dst, int(val))
+}
+
+// AppendInts16 encodes and inserts an array of int16 values into the dst byte array.
+func (e Encoder) AppendInts16(dst []byte, vals []int16) []byte {
+	major := majorTypeArray
+	l := len(vals)
+	if l == 0 {
+		return e.AppendArrayEnd(e.AppendArrayStart(dst))
+	}
+	if l <= additionalMax {
+		lb := byte(l)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, major, uint64(l))
+	}
+	for _, v := range vals {
+		dst = e.AppendInt(dst, int(v))
+	}
+	return dst
+}
+
+// AppendInt32 encodes and inserts a int32 value into the dst byte array.
+func (e Encoder) AppendInt32(dst []byte, val int32) []byte {
+	return e.AppendInt(dst, int(val))
+}
+
+// AppendInts32 encodes and inserts an array of int32 values into the dst byte array.
+func (e Encoder) AppendInts32(dst []byte, vals []int32) []byte {
+	major := majorTypeArray
+	l := len(vals)
+	if l == 0 {
+		return e.AppendArrayEnd(e.AppendArrayStart(dst))
+	}
+	if l <= additionalMax {
+		lb := byte(l)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, major, uint64(l))
+	}
+	for _, v := range vals {
+		dst = e.AppendInt(dst, int(v))
+	}
+	return dst
+}
+
+// AppendInt64 encodes and inserts a int64 value into the dst byte array.
+func (Encoder) AppendInt64(dst []byte, val int64) []byte {
+	major := majorTypeUnsignedInt
+	contentVal := val
+	if val < 0 {
+		major = majorTypeNegativeInt
+		contentVal = -val - 1
+	}
+	if contentVal <= additionalMax {
+		lb := byte(contentVal)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, major, uint64(contentVal))
+	}
+	return dst
+}
+
+// AppendInts64 encodes and inserts an array of int64 values into the dst byte array.
+func (e Encoder) AppendInts64(dst []byte, vals []int64) []byte {
+	major := majorTypeArray
+	l := len(vals)
+	if l == 0 {
+		return e.AppendArrayEnd(e.AppendArrayStart(dst))
+	}
+	if l <= additionalMax {
+		lb := byte(l)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, major, uint64(l))
+	}
+	for _, v := range vals {
+		dst = e.AppendInt64(dst, v)
+	}
+	return dst
+}
+
+// AppendUint encodes and inserts an unsigned integer value into the dst byte array.
+func (e Encoder) AppendUint(dst []byte, val uint) []byte {
+	return e.AppendInt64(dst, int64(val))
+}
+
+// AppendUints encodes and inserts an array of unsigned integer values into the dst byte array.
+func (e Encoder) AppendUints(dst []byte, vals []uint) []byte {
+	major := majorTypeArray
+	l := len(vals)
+	if l == 0 {
+		return e.AppendArrayEnd(e.AppendArrayStart(dst))
+	}
+	if l <= additionalMax {
+		lb := byte(l)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, major, uint64(l))
+	}
+	for _, v := range vals {
+		dst = e.AppendUint(dst, v)
+	}
+	return dst
+}
+
+// AppendUint8 encodes and inserts a unsigned int8 value into the dst byte array.
+func (e Encoder) AppendUint8(dst []byte, val uint8) []byte {
+	return e.AppendUint(dst, uint(val))
+}
+
+// AppendUints8 encodes and inserts an array of uint8 values into the dst byte array.
+func (e Encoder) AppendUints8(dst []byte, vals []uint8) []byte {
+	major := majorTypeArray
+	l := len(vals)
+	if l == 0 {
+		return e.AppendArrayEnd(e.AppendArrayStart(dst))
+	}
+	if l <= additionalMax {
+		lb := byte(l)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, major, uint64(l))
+	}
+	for _, v := range vals {
+		dst = e.AppendUint8(dst, v)
+	}
+	return dst
+}
+
+// AppendUint16 encodes and inserts a uint16 value into the dst byte array.
+func (e Encoder) AppendUint16(dst []byte, val uint16) []byte {
+	return e.AppendUint(dst, uint(val))
+}
+
+// AppendUints16 encodes and inserts an array of uint16 values into the dst byte array.
+func (e Encoder) AppendUints16(dst []byte, vals []uint16) []byte {
+	major := majorTypeArray
+	l := len(vals)
+	if l == 0 {
+		return e.AppendArrayEnd(e.AppendArrayStart(dst))
+	}
+	if l <= additionalMax {
+		lb := byte(l)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, major, uint64(l))
+	}
+	for _, v := range vals {
+		dst = e.AppendUint16(dst, v)
+	}
+	return dst
+}
+
+// AppendUint32 encodes and inserts a uint32 value into the dst byte array.
+func (e Encoder) AppendUint32(dst []byte, val uint32) []byte {
+	return e.AppendUint(dst, uint(val))
+}
+
+// AppendUints32 encodes and inserts an array of uint32 values into the dst byte array.
+func (e Encoder) AppendUints32(dst []byte, vals []uint32) []byte {
+	major := majorTypeArray
+	l := len(vals)
+	if l == 0 {
+		return e.AppendArrayEnd(e.AppendArrayStart(dst))
+	}
+	if l <= additionalMax {
+		lb := byte(l)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, major, uint64(l))
+	}
+	for _, v := range vals {
+		dst = e.AppendUint32(dst, v)
+	}
+	return dst
+}
+
+// AppendUint64 encodes and inserts a uint64 value into the dst byte array.
+func (Encoder) AppendUint64(dst []byte, val uint64) []byte {
+	major := majorTypeUnsignedInt
+	contentVal := val
+	if contentVal <= additionalMax {
+		lb := byte(contentVal)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, major, uint64(contentVal))
+	}
+	return dst
+}
+
+// AppendUints64 encodes and inserts an array of uint64 values into the dst byte array.
+func (e Encoder) AppendUints64(dst []byte, vals []uint64) []byte {
+	major := majorTypeArray
+	l := len(vals)
+	if l == 0 {
+		return e.AppendArrayEnd(e.AppendArrayStart(dst))
+	}
+	if l <= additionalMax {
+		lb := byte(l)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, major, uint64(l))
+	}
+	for _, v := range vals {
+		dst = e.AppendUint64(dst, v)
+	}
+	return dst
+}
+
+// AppendFloat32 encodes and inserts a single precision float value into the dst byte array.
+func (Encoder) AppendFloat32(dst []byte, val float32) []byte {
+	switch {
+	case math.IsNaN(float64(val)):
+		return append(dst, "\xfa\x7f\xc0\x00\x00"...)
+	case math.IsInf(float64(val), 1):
+		return append(dst, "\xfa\x7f\x80\x00\x00"...)
+	case math.IsInf(float64(val), -1):
+		return append(dst, "\xfa\xff\x80\x00\x00"...)
+	}
+	major := majorTypeSimpleAndFloat
+	subType := additionalTypeFloat32
+	n := math.Float32bits(val)
+	var buf [4]byte
+	for i := uint(0); i < 4; i++ {
+		buf[i] = byte(n >> ((3 - i) * 8))
+	}
+	return append(append(dst, byte(major|subType)), buf[0], buf[1], buf[2], buf[3])
+}
+
+// AppendFloats32 encodes and inserts an array of single precision float value into the dst byte array.
+func (e Encoder) AppendFloats32(dst []byte, vals []float32) []byte {
+	major := majorTypeArray
+	l := len(vals)
+	if l == 0 {
+		return e.AppendArrayEnd(e.AppendArrayStart(dst))
+	}
+	if l <= additionalMax {
+		lb := byte(l)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, major, uint64(l))
+	}
+	for _, v := range vals {
+		dst = e.AppendFloat32(dst, v)
+	}
+	return dst
+}
+
+// AppendFloat64 encodes and inserts a double precision float value into the dst byte array.
+func (Encoder) AppendFloat64(dst []byte, val float64) []byte {
+	switch {
+	case math.IsNaN(val):
+		return append(dst, "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00"...)
+	case math.IsInf(val, 1):
+		return append(dst, "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00"...)
+	case math.IsInf(val, -1):
+		return append(dst, "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00"...)
+	}
+	major := majorTypeSimpleAndFloat
+	subType := additionalTypeFloat64
+	n := math.Float64bits(val)
+	dst = append(dst, byte(major|subType))
+	for i := uint(1); i <= 8; i++ {
+		b := byte(n >> ((8 - i) * 8))
+		dst = append(dst, b)
+	}
+	return dst
+}
+
+// AppendFloats64 encodes and inserts an array of double precision float values into the dst byte array.
+func (e Encoder) AppendFloats64(dst []byte, vals []float64) []byte {
+	major := majorTypeArray
+	l := len(vals)
+	if l == 0 {
+		return e.AppendArrayEnd(e.AppendArrayStart(dst))
+	}
+	if l <= additionalMax {
+		lb := byte(l)
+		dst = append(dst, byte(major|lb))
+	} else {
+		dst = appendCborTypePrefix(dst, major, uint64(l))
+	}
+	for _, v := range vals {
+		dst = e.AppendFloat64(dst, v)
+	}
+	return dst
+}
+
+// AppendInterface takes an arbitrary object and converts it to JSON and embeds it dst.
+func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte {
+	marshaled, err := json.Marshal(i)
+	if err != nil {
+		return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err))
+	}
+	return AppendEmbeddedJSON(dst, marshaled)
+}
+
+// AppendIPAddr encodes and inserts an IP Address (IPv4 or IPv6).
+func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte {
+	dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16))
+	dst = append(dst, byte(additionalTypeTagNetworkAddr>>8))
+	dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff))
+	return e.AppendBytes(dst, ip)
+}
+
+// AppendIPPrefix encodes and inserts an IP Address Prefix (Address + Mask Length).
+func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte {
+	dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16))
+	dst = append(dst, byte(additionalTypeTagNetworkPrefix>>8))
+	dst = append(dst, byte(additionalTypeTagNetworkPrefix&0xff))
+
+	// Prefix is a tuple (aka MAP of 1 pair of elements) -
+	// first element is prefix, second is mask length.
+	dst = append(dst, byte(majorTypeMap|0x1))
+	dst = e.AppendBytes(dst, pfx.IP)
+	maskLen, _ := pfx.Mask.Size()
+	return e.AppendUint8(dst, uint8(maskLen))
+}
+
+// AppendMACAddr encodes and inserts an Hardware (MAC) address.
+func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte {
+	dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16))
+	dst = append(dst, byte(additionalTypeTagNetworkAddr>>8))
+	dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff))
+	return e.AppendBytes(dst, ha)
+}
+
+// AppendHex adds a TAG and inserts a hex bytes as a string.
+func (e Encoder) AppendHex(dst []byte, val []byte) []byte {
+	dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16))
+	dst = append(dst, byte(additionalTypeTagHexString>>8))
+	dst = append(dst, byte(additionalTypeTagHexString&0xff))
+	return e.AppendBytes(dst, val)
+}
diff --git a/vendor/github.com/rs/zerolog/internal/json/base.go b/vendor/github.com/rs/zerolog/internal/json/base.go
new file mode 100644
index 0000000..d6f8839
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/json/base.go
@@ -0,0 +1,12 @@
+package json
+
+type Encoder struct{}
+
+// AppendKey appends a new key to the output JSON.
+func (e Encoder) AppendKey(dst []byte, key string) []byte {
+	if len(dst) > 1 && dst[len(dst)-1] != '{' {
+		dst = append(dst, ',')
+	}
+	dst = e.AppendString(dst, key)
+	return append(dst, ':')
+}
\ No newline at end of file
diff --git a/vendor/github.com/rs/zerolog/internal/json/bytes.go b/vendor/github.com/rs/zerolog/internal/json/bytes.go
new file mode 100644
index 0000000..de64120
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/json/bytes.go
@@ -0,0 +1,85 @@
+package json
+
+import "unicode/utf8"
+
+// AppendBytes is a mirror of appendString with []byte arg
+func (Encoder) AppendBytes(dst, s []byte) []byte {
+	dst = append(dst, '"')
+	for i := 0; i < len(s); i++ {
+		if !noEscapeTable[s[i]] {
+			dst = appendBytesComplex(dst, s, i)
+			return append(dst, '"')
+		}
+	}
+	dst = append(dst, s...)
+	return append(dst, '"')
+}
+
+// AppendHex encodes the input bytes to a hex string and appends
+// the encoded string to the input byte slice.
+//
+// The operation loops though each byte and encodes it as hex using
+// the hex lookup table.
+func (Encoder) AppendHex(dst, s []byte) []byte {
+	dst = append(dst, '"')
+	for _, v := range s {
+		dst = append(dst, hex[v>>4], hex[v&0x0f])
+	}
+	return append(dst, '"')
+}
+
+// appendBytesComplex is a mirror of the appendStringComplex
+// with []byte arg
+func appendBytesComplex(dst, s []byte, i int) []byte {
+	start := 0
+	for i < len(s) {
+		b := s[i]
+		if b >= utf8.RuneSelf {
+			r, size := utf8.DecodeRune(s[i:])
+			if r == utf8.RuneError && size == 1 {
+				if start < i {
+					dst = append(dst, s[start:i]...)
+				}
+				dst = append(dst, `\ufffd`...)
+				i += size
+				start = i
+				continue
+			}
+			i += size
+			continue
+		}
+		if noEscapeTable[b] {
+			i++
+			continue
+		}
+		// We encountered a character that needs to be encoded.
+		// Let's append the previous simple characters to the byte slice
+		// and switch our operation to read and encode the remainder
+		// characters byte-by-byte.
+		if start < i {
+			dst = append(dst, s[start:i]...)
+		}
+		switch b {
+		case '"', '\\':
+			dst = append(dst, '\\', b)
+		case '\b':
+			dst = append(dst, '\\', 'b')
+		case '\f':
+			dst = append(dst, '\\', 'f')
+		case '\n':
+			dst = append(dst, '\\', 'n')
+		case '\r':
+			dst = append(dst, '\\', 'r')
+		case '\t':
+			dst = append(dst, '\\', 't')
+		default:
+			dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF])
+		}
+		i++
+		start = i
+	}
+	if start < len(s) {
+		dst = append(dst, s[start:]...)
+	}
+	return dst
+}
diff --git a/vendor/github.com/rs/zerolog/internal/json/string.go b/vendor/github.com/rs/zerolog/internal/json/string.go
new file mode 100644
index 0000000..815906f
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/json/string.go
@@ -0,0 +1,121 @@
+package json
+
+import "unicode/utf8"
+
+const hex = "0123456789abcdef"
+
+var noEscapeTable = [256]bool{}
+
+func init() {
+	for i := 0; i <= 0x7e; i++ {
+		noEscapeTable[i] = i >= 0x20 && i != '\\' && i != '"'
+	}
+}
+
+// AppendStrings encodes the input strings to json and
+// appends the encoded string list to the input byte slice.
+func (e Encoder) AppendStrings(dst []byte, vals []string) []byte {
+	if len(vals) == 0 {
+		return append(dst, '[', ']')
+	}
+	dst = append(dst, '[')
+	dst = e.AppendString(dst, vals[0])
+	if len(vals) > 1 {
+		for _, val := range vals[1:] {
+			dst = e.AppendString(append(dst, ','), val)
+		}
+	}
+	dst = append(dst, ']')
+	return dst
+}
+
+// AppendString encodes the input string to json and appends
+// the encoded string to the input byte slice.
+//
+// The operation loops though each byte in the string looking
+// for characters that need json or utf8 encoding. If the string
+// does not need encoding, then the string is appended in it's
+// entirety to the byte slice.
+// If we encounter a byte that does need encoding, switch up
+// the operation and perform a byte-by-byte read-encode-append.
+func (Encoder) AppendString(dst []byte, s string) []byte {
+	// Start with a double quote.
+	dst = append(dst, '"')
+	// Loop through each character in the string.
+	for i := 0; i < len(s); i++ {
+		// Check if the character needs encoding. Control characters, slashes,
+		// and the double quote need json encoding. Bytes above the ascii
+		// boundary needs utf8 encoding.
+		if !noEscapeTable[s[i]] {
+			// We encountered a character that needs to be encoded. Switch
+			// to complex version of the algorithm.
+			dst = appendStringComplex(dst, s, i)
+			return append(dst, '"')
+		}
+	}
+	// The string has no need for encoding an therefore is directly
+	// appended to the byte slice.
+	dst = append(dst, s...)
+	// End with a double quote
+	return append(dst, '"')
+}
+
+// appendStringComplex is used by appendString to take over an in
+// progress JSON string encoding that encountered a character that needs
+// to be encoded.
+func appendStringComplex(dst []byte, s string, i int) []byte {
+	start := 0
+	for i < len(s) {
+		b := s[i]
+		if b >= utf8.RuneSelf {
+			r, size := utf8.DecodeRuneInString(s[i:])
+			if r == utf8.RuneError && size == 1 {
+				// In case of error, first append previous simple characters to
+				// the byte slice if any and append a remplacement character code
+				// in place of the invalid sequence.
+				if start < i {
+					dst = append(dst, s[start:i]...)
+				}
+				dst = append(dst, `\ufffd`...)
+				i += size
+				start = i
+				continue
+			}
+			i += size
+			continue
+		}
+		if noEscapeTable[b] {
+			i++
+			continue
+		}
+		// We encountered a character that needs to be encoded.
+		// Let's append the previous simple characters to the byte slice
+		// and switch our operation to read and encode the remainder
+		// characters byte-by-byte.
+		if start < i {
+			dst = append(dst, s[start:i]...)
+		}
+		switch b {
+		case '"', '\\':
+			dst = append(dst, '\\', b)
+		case '\b':
+			dst = append(dst, '\\', 'b')
+		case '\f':
+			dst = append(dst, '\\', 'f')
+		case '\n':
+			dst = append(dst, '\\', 'n')
+		case '\r':
+			dst = append(dst, '\\', 'r')
+		case '\t':
+			dst = append(dst, '\\', 't')
+		default:
+			dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF])
+		}
+		i++
+		start = i
+	}
+	if start < len(s) {
+		dst = append(dst, s[start:]...)
+	}
+	return dst
+}
diff --git a/vendor/github.com/rs/zerolog/internal/json/time.go b/vendor/github.com/rs/zerolog/internal/json/time.go
new file mode 100644
index 0000000..739afff
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/json/time.go
@@ -0,0 +1,76 @@
+package json
+
+import (
+	"strconv"
+	"time"
+)
+
+// AppendTime formats the input time with the given format
+// and appends the encoded string to the input byte slice.
+func (e Encoder) AppendTime(dst []byte, t time.Time, format string) []byte {
+	if format == "" {
+		return e.AppendInt64(dst, t.Unix())
+	}
+	return append(t.AppendFormat(append(dst, '"'), format), '"')
+}
+
+// AppendTimes converts the input times with the given format
+// and appends the encoded string list to the input byte slice.
+func (Encoder) AppendTimes(dst []byte, vals []time.Time, format string) []byte {
+	if format == "" {
+		return appendUnixTimes(dst, vals)
+	}
+	if len(vals) == 0 {
+		return append(dst, '[', ']')
+	}
+	dst = append(dst, '[')
+	dst = append(vals[0].AppendFormat(append(dst, '"'), format), '"')
+	if len(vals) > 1 {
+		for _, t := range vals[1:] {
+			dst = append(t.AppendFormat(append(dst, ',', '"'), format), '"')
+		}
+	}
+	dst = append(dst, ']')
+	return dst
+}
+
+func appendUnixTimes(dst []byte, vals []time.Time) []byte {
+	if len(vals) == 0 {
+		return append(dst, '[', ']')
+	}
+	dst = append(dst, '[')
+	dst = strconv.AppendInt(dst, vals[0].Unix(), 10)
+	if len(vals) > 1 {
+		for _, t := range vals[1:] {
+			dst = strconv.AppendInt(append(dst, ','), t.Unix(), 10)
+		}
+	}
+	dst = append(dst, ']')
+	return dst
+}
+
+// AppendDuration formats the input duration with the given unit & format
+// and appends the encoded string to the input byte slice.
+func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte {
+	if useInt {
+		return strconv.AppendInt(dst, int64(d/unit), 10)
+	}
+	return e.AppendFloat64(dst, float64(d)/float64(unit))
+}
+
+// AppendDurations formats the input durations with the given unit & format
+// and appends the encoded string list to the input byte slice.
+func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte {
+	if len(vals) == 0 {
+		return append(dst, '[', ']')
+	}
+	dst = append(dst, '[')
+	dst = e.AppendDuration(dst, vals[0], unit, useInt)
+	if len(vals) > 1 {
+		for _, d := range vals[1:] {
+			dst = e.AppendDuration(append(dst, ','), d, unit, useInt)
+		}
+	}
+	dst = append(dst, ']')
+	return dst
+}
diff --git a/vendor/github.com/rs/zerolog/internal/json/types.go b/vendor/github.com/rs/zerolog/internal/json/types.go
new file mode 100644
index 0000000..f343c86
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/internal/json/types.go
@@ -0,0 +1,402 @@
+package json
+
+import (
+	"encoding/json"
+	"fmt"
+	"math"
+	"net"
+	"strconv"
+)
+
+// AppendNil inserts a 'Nil' object into the dst byte array.
+func (Encoder) AppendNil(dst []byte) []byte {
+	return append(dst, "null"...)
+}
+
+// AppendBeginMarker inserts a map start into the dst byte array.
+func (Encoder) AppendBeginMarker(dst []byte) []byte {
+	return append(dst, '{')
+}
+
+// AppendEndMarker inserts a map end into the dst byte array.
+func (Encoder) AppendEndMarker(dst []byte) []byte {
+	return append(dst, '}')
+}
+
+// AppendLineBreak appends a line break.
+func (Encoder) AppendLineBreak(dst []byte) []byte {
+	return append(dst, '\n')
+}
+
+// AppendArrayStart adds markers to indicate the start of an array.
+func (Encoder) AppendArrayStart(dst []byte) []byte {
+	return append(dst, '[')
+}
+
+// AppendArrayEnd adds markers to indicate the end of an array.
+func (Encoder) AppendArrayEnd(dst []byte) []byte {
+	return append(dst, ']')
+}
+
+// AppendArrayDelim adds markers to indicate end of a particular array element.
+func (Encoder) AppendArrayDelim(dst []byte) []byte {
+	if len(dst) > 0 {
+		return append(dst, ',')
+	}
+	return dst
+}
+
+// AppendBool converts the input bool to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendBool(dst []byte, val bool) []byte {
+	return strconv.AppendBool(dst, val)
+}
+
+// AppendBools encodes the input bools to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendBools(dst []byte, vals []bool) []byte {
+	if len(vals) == 0 {
+		return append(dst, '[', ']')
+	}
+	dst = append(dst, '[')
+	dst = strconv.AppendBool(dst, vals[0])
+	if len(vals) > 1 {
+		for _, val := range vals[1:] {
+			dst = strconv.AppendBool(append(dst, ','), val)
+		}
+	}
+	dst = append(dst, ']')
+	return dst
+}
+
+// AppendInt converts the input int to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendInt(dst []byte, val int) []byte {
+	return strconv.AppendInt(dst, int64(val), 10)
+}
+
+// AppendInts encodes the input ints to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendInts(dst []byte, vals []int) []byte {
+	if len(vals) == 0 {
+		return append(dst, '[', ']')
+	}
+	dst = append(dst, '[')
+	dst = strconv.AppendInt(dst, int64(vals[0]), 10)
+	if len(vals) > 1 {
+		for _, val := range vals[1:] {
+			dst = strconv.AppendInt(append(dst, ','), int64(val), 10)
+		}
+	}
+	dst = append(dst, ']')
+	return dst
+}
+
+// AppendInt8 converts the input []int8 to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendInt8(dst []byte, val int8) []byte {
+	return strconv.AppendInt(dst, int64(val), 10)
+}
+
+// AppendInts8 encodes the input int8s to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendInts8(dst []byte, vals []int8) []byte {
+	if len(vals) == 0 {
+		return append(dst, '[', ']')
+	}
+	dst = append(dst, '[')
+	dst = strconv.AppendInt(dst, int64(vals[0]), 10)
+	if len(vals) > 1 {
+		for _, val := range vals[1:] {
+			dst = strconv.AppendInt(append(dst, ','), int64(val), 10)
+		}
+	}
+	dst = append(dst, ']')
+	return dst
+}
+
+// AppendInt16 converts the input int16 to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendInt16(dst []byte, val int16) []byte {
+	return strconv.AppendInt(dst, int64(val), 10)
+}
+
+// AppendInts16 encodes the input int16s to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendInts16(dst []byte, vals []int16) []byte {
+	if len(vals) == 0 {
+		return append(dst, '[', ']')
+	}
+	dst = append(dst, '[')
+	dst = strconv.AppendInt(dst, int64(vals[0]), 10)
+	if len(vals) > 1 {
+		for _, val := range vals[1:] {
+			dst = strconv.AppendInt(append(dst, ','), int64(val), 10)
+		}
+	}
+	dst = append(dst, ']')
+	return dst
+}
+
+// AppendInt32 converts the input int32 to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendInt32(dst []byte, val int32) []byte {
+	return strconv.AppendInt(dst, int64(val), 10)
+}
+
+// AppendInts32 encodes the input int32s to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendInts32(dst []byte, vals []int32) []byte {
+	if len(vals) == 0 {
+		return append(dst, '[', ']')
+	}
+	dst = append(dst, '[')
+	dst = strconv.AppendInt(dst, int64(vals[0]), 10)
+	if len(vals) > 1 {
+		for _, val := range vals[1:] {
+			dst = strconv.AppendInt(append(dst, ','), int64(val), 10)
+		}
+	}
+	dst = append(dst, ']')
+	return dst
+}
+
+// AppendInt64 converts the input int64 to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendInt64(dst []byte, val int64) []byte {
+	return strconv.AppendInt(dst, val, 10)
+}
+
+// AppendInts64 encodes the input int64s to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendInts64(dst []byte, vals []int64) []byte {
+	if len(vals) == 0 {
+		return append(dst, '[', ']')
+	}
+	dst = append(dst, '[')
+	dst = strconv.AppendInt(dst, vals[0], 10)
+	if len(vals) > 1 {
+		for _, val := range vals[1:] {
+			dst = strconv.AppendInt(append(dst, ','), val, 10)
+		}
+	}
+	dst = append(dst, ']')
+	return dst
+}
+
+// AppendUint converts the input uint to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendUint(dst []byte, val uint) []byte {
+	return strconv.AppendUint(dst, uint64(val), 10)
+}
+
+// AppendUints encodes the input uints to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendUints(dst []byte, vals []uint) []byte {
+	if len(vals) == 0 {
+		return append(dst, '[', ']')
+	}
+	dst = append(dst, '[')
+	dst = strconv.AppendUint(dst, uint64(vals[0]), 10)
+	if len(vals) > 1 {
+		for _, val := range vals[1:] {
+			dst = strconv.AppendUint(append(dst, ','), uint64(val), 10)
+		}
+	}
+	dst = append(dst, ']')
+	return dst
+}
+
+// AppendUint8 converts the input uint8 to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendUint8(dst []byte, val uint8) []byte {
+	return strconv.AppendUint(dst, uint64(val), 10)
+}
+
+// AppendUints8 encodes the input uint8s to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendUints8(dst []byte, vals []uint8) []byte {
+	if len(vals) == 0 {
+		return append(dst, '[', ']')
+	}
+	dst = append(dst, '[')
+	dst = strconv.AppendUint(dst, uint64(vals[0]), 10)
+	if len(vals) > 1 {
+		for _, val := range vals[1:] {
+			dst = strconv.AppendUint(append(dst, ','), uint64(val), 10)
+		}
+	}
+	dst = append(dst, ']')
+	return dst
+}
+
+// AppendUint16 converts the input uint16 to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendUint16(dst []byte, val uint16) []byte {
+	return strconv.AppendUint(dst, uint64(val), 10)
+}
+
+// AppendUints16 encodes the input uint16s to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendUints16(dst []byte, vals []uint16) []byte {
+	if len(vals) == 0 {
+		return append(dst, '[', ']')
+	}
+	dst = append(dst, '[')
+	dst = strconv.AppendUint(dst, uint64(vals[0]), 10)
+	if len(vals) > 1 {
+		for _, val := range vals[1:] {
+			dst = strconv.AppendUint(append(dst, ','), uint64(val), 10)
+		}
+	}
+	dst = append(dst, ']')
+	return dst
+}
+
+// AppendUint32 converts the input uint32 to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendUint32(dst []byte, val uint32) []byte {
+	return strconv.AppendUint(dst, uint64(val), 10)
+}
+
+// AppendUints32 encodes the input uint32s to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendUints32(dst []byte, vals []uint32) []byte {
+	if len(vals) == 0 {
+		return append(dst, '[', ']')
+	}
+	dst = append(dst, '[')
+	dst = strconv.AppendUint(dst, uint64(vals[0]), 10)
+	if len(vals) > 1 {
+		for _, val := range vals[1:] {
+			dst = strconv.AppendUint(append(dst, ','), uint64(val), 10)
+		}
+	}
+	dst = append(dst, ']')
+	return dst
+}
+
+// AppendUint64 converts the input uint64 to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendUint64(dst []byte, val uint64) []byte {
+	return strconv.AppendUint(dst, uint64(val), 10)
+}
+
+// AppendUints64 encodes the input uint64s to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendUints64(dst []byte, vals []uint64) []byte {
+	if len(vals) == 0 {
+		return append(dst, '[', ']')
+	}
+	dst = append(dst, '[')
+	dst = strconv.AppendUint(dst, vals[0], 10)
+	if len(vals) > 1 {
+		for _, val := range vals[1:] {
+			dst = strconv.AppendUint(append(dst, ','), val, 10)
+		}
+	}
+	dst = append(dst, ']')
+	return dst
+}
+
+func appendFloat(dst []byte, val float64, bitSize int) []byte {
+	// JSON does not permit NaN or Infinity. A typical JSON encoder would fail
+	// with an error, but a logging library wants the data to get thru so we
+	// make a tradeoff and store those types as string.
+	switch {
+	case math.IsNaN(val):
+		return append(dst, `"NaN"`...)
+	case math.IsInf(val, 1):
+		return append(dst, `"+Inf"`...)
+	case math.IsInf(val, -1):
+		return append(dst, `"-Inf"`...)
+	}
+	return strconv.AppendFloat(dst, val, 'f', -1, bitSize)
+}
+
+// AppendFloat32 converts the input float32 to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendFloat32(dst []byte, val float32) []byte {
+	return appendFloat(dst, float64(val), 32)
+}
+
+// AppendFloats32 encodes the input float32s to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendFloats32(dst []byte, vals []float32) []byte {
+	if len(vals) == 0 {
+		return append(dst, '[', ']')
+	}
+	dst = append(dst, '[')
+	dst = appendFloat(dst, float64(vals[0]), 32)
+	if len(vals) > 1 {
+		for _, val := range vals[1:] {
+			dst = appendFloat(append(dst, ','), float64(val), 32)
+		}
+	}
+	dst = append(dst, ']')
+	return dst
+}
+
+// AppendFloat64 converts the input float64 to a string and
+// appends the encoded string to the input byte slice.
+func (Encoder) AppendFloat64(dst []byte, val float64) []byte {
+	return appendFloat(dst, val, 64)
+}
+
+// AppendFloats64 encodes the input float64s to json and
+// appends the encoded string list to the input byte slice.
+func (Encoder) AppendFloats64(dst []byte, vals []float64) []byte {
+	if len(vals) == 0 {
+		return append(dst, '[', ']')
+	}
+	dst = append(dst, '[')
+	dst = appendFloat(dst, vals[0], 32)
+	if len(vals) > 1 {
+		for _, val := range vals[1:] {
+			dst = appendFloat(append(dst, ','), val, 64)
+		}
+	}
+	dst = append(dst, ']')
+	return dst
+}
+
+// AppendInterface marshals the input interface to a string and
+// appends the encoded string to the input byte slice.
+func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte {
+	marshaled, err := json.Marshal(i)
+	if err != nil {
+		return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err))
+	}
+	return append(dst, marshaled...)
+}
+
+// AppendObjectData takes in an object that is already in a byte array
+// and adds it to the dst.
+func (Encoder) AppendObjectData(dst []byte, o []byte) []byte {
+	// Two conditions we want to put a ',' between existing content and
+	// new content:
+	// 1. new content starts with '{' - which shd be dropped   OR
+	// 2. existing content has already other fields
+	if o[0] == '{' {
+		o[0] = ','
+	} else if len(dst) > 1 {
+		dst = append(dst, ',')
+	}
+	return append(dst, o...)
+}
+
+// AppendIPAddr adds IPv4 or IPv6 address to dst.
+func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte {
+	return e.AppendString(dst, ip.String())
+}
+
+// AppendIPPrefix adds IPv4 or IPv6 Prefix (address & mask) to dst.
+func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte {
+	return e.AppendString(dst, pfx.String())
+
+}
+
+// AppendMACAddr adds MAC address to dst.
+func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte {
+	return e.AppendString(dst, ha.String())
+}
diff --git a/vendor/github.com/rs/zerolog/log.go b/vendor/github.com/rs/zerolog/log.go
new file mode 100644
index 0000000..8eb45a8
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/log.go
@@ -0,0 +1,400 @@
+// Package zerolog provides a lightweight logging library dedicated to JSON logging.
+//
+// A global Logger can be use for simple logging:
+//
+//     import "github.com/rs/zerolog/log"
+//
+//     log.Info().Msg("hello world")
+//     // Output: {"time":1494567715,"level":"info","message":"hello world"}
+//
+// NOTE: To import the global logger, import the "log" subpackage "github.com/rs/zerolog/log".
+//
+// Fields can be added to log messages:
+//
+//     log.Info().Str("foo", "bar").Msg("hello world")
+//     // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"}
+//
+// Create logger instance to manage different outputs:
+//
+//     logger := zerolog.New(os.Stderr).With().Timestamp().Logger()
+//     logger.Info().
+//            Str("foo", "bar").
+//            Msg("hello world")
+//     // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"}
+//
+// Sub-loggers let you chain loggers with additional context:
+//
+//     sublogger := log.With().Str("component": "foo").Logger()
+//     sublogger.Info().Msg("hello world")
+//     // Output: {"time":1494567715,"level":"info","message":"hello world","component":"foo"}
+//
+// Level logging
+//
+//     zerolog.SetGlobalLevel(zerolog.InfoLevel)
+//
+//     log.Debug().Msg("filtered out message")
+//     log.Info().Msg("routed message")
+//
+//     if e := log.Debug(); e.Enabled() {
+//         // Compute log output only if enabled.
+//         value := compute()
+//         e.Str("foo": value).Msg("some debug message")
+//     }
+//     // Output: {"level":"info","time":1494567715,"routed message"}
+//
+// Customize automatic field names:
+//
+//     log.TimestampFieldName = "t"
+//     log.LevelFieldName = "p"
+//     log.MessageFieldName = "m"
+//
+//     log.Info().Msg("hello world")
+//     // Output: {"t":1494567715,"p":"info","m":"hello world"}
+//
+// Log with no level and message:
+//
+//     log.Log().Str("foo","bar").Msg("")
+//     // Output: {"time":1494567715,"foo":"bar"}
+//
+// Add contextual fields to global Logger:
+//
+//     log.Logger = log.With().Str("foo", "bar").Logger()
+//
+// Sample logs:
+//
+//     sampled := log.Sample(&zerolog.BasicSampler{N: 10})
+//     sampled.Info().Msg("will be logged every 10 messages")
+//
+// Log with contextual hooks:
+//
+//     // Create the hook:
+//     type SeverityHook struct{}
+//
+//     func (h SeverityHook) Run(e *zerolog.Event, level zerolog.Level, msg string) {
+//          if level != zerolog.NoLevel {
+//              e.Str("severity", level.String())
+//          }
+//     }
+//
+//     // And use it:
+//     var h SeverityHook
+//     log := zerolog.New(os.Stdout).Hook(h)
+//     log.Warn().Msg("")
+//     // Output: {"level":"warn","severity":"warn"}
+//
+//
+// Caveats
+//
+// There is no fields deduplication out-of-the-box.
+// Using the same key multiple times creates new key in final JSON each time.
+//
+//     logger := zerolog.New(os.Stderr).With().Timestamp().Logger()
+//     logger.Info().
+//            Timestamp().
+//            Msg("dup")
+//     // Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"}
+//
+// However, it’s not a big deal though as JSON accepts dup keys,
+// the last one prevails.
+package zerolog
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"strconv"
+)
+
+// Level defines log levels.
+type Level uint8
+
+const (
+	// DebugLevel defines debug log level.
+	DebugLevel Level = iota
+	// InfoLevel defines info log level.
+	InfoLevel
+	// WarnLevel defines warn log level.
+	WarnLevel
+	// ErrorLevel defines error log level.
+	ErrorLevel
+	// FatalLevel defines fatal log level.
+	FatalLevel
+	// PanicLevel defines panic log level.
+	PanicLevel
+	// NoLevel defines an absent log level.
+	NoLevel
+	// Disabled disables the logger.
+	Disabled
+)
+
+func (l Level) String() string {
+	switch l {
+	case DebugLevel:
+		return "debug"
+	case InfoLevel:
+		return "info"
+	case WarnLevel:
+		return "warn"
+	case ErrorLevel:
+		return "error"
+	case FatalLevel:
+		return "fatal"
+	case PanicLevel:
+		return "panic"
+	case NoLevel:
+		return ""
+	}
+	return ""
+}
+
+// ParseLevel converts a level string into a zerolog Level value.
+// returns an error if the input string does not match known values.
+func ParseLevel(levelStr string) (Level, error) {
+	switch levelStr {
+	case DebugLevel.String():
+		return DebugLevel, nil
+	case InfoLevel.String():
+		return InfoLevel, nil
+	case WarnLevel.String():
+		return WarnLevel, nil
+	case ErrorLevel.String():
+		return ErrorLevel, nil
+	case FatalLevel.String():
+		return FatalLevel, nil
+	case PanicLevel.String():
+		return PanicLevel, nil
+	case NoLevel.String():
+		return NoLevel, nil
+	}
+	return NoLevel, fmt.Errorf("Unknown Level String: '%s', defaulting to NoLevel", levelStr)
+}
+
+// A Logger represents an active logging object that generates lines
+// of JSON output to an io.Writer. Each logging operation makes a single
+// call to the Writer's Write method. There is no guaranty on access
+// serialization to the Writer. If your Writer is not thread safe,
+// you may consider a sync wrapper.
+type Logger struct {
+	w       LevelWriter
+	level   Level
+	sampler Sampler
+	context []byte
+	hooks   []Hook
+}
+
+// New creates a root logger with given output writer. If the output writer implements
+// the LevelWriter interface, the WriteLevel method will be called instead of the Write
+// one.
+//
+// Each logging operation makes a single call to the Writer's Write method. There is no
+// guaranty on access serialization to the Writer. If your Writer is not thread safe,
+// you may consider using sync wrapper.
+func New(w io.Writer) Logger {
+	if w == nil {
+		w = ioutil.Discard
+	}
+	lw, ok := w.(LevelWriter)
+	if !ok {
+		lw = levelWriterAdapter{w}
+	}
+	return Logger{w: lw}
+}
+
+// Nop returns a disabled logger for which all operation are no-op.
+func Nop() Logger {
+	return New(nil).Level(Disabled)
+}
+
+// Output duplicates the current logger and sets w as its output.
+func (l Logger) Output(w io.Writer) Logger {
+	l2 := New(w)
+	l2.level = l.level
+	l2.sampler = l.sampler
+	if len(l.hooks) > 0 {
+		l2.hooks = append(l2.hooks, l.hooks...)
+	}
+	if l.context != nil {
+		l2.context = make([]byte, len(l.context), cap(l.context))
+		copy(l2.context, l.context)
+	}
+	return l2
+}
+
+// With creates a child logger with the field added to its context.
+func (l Logger) With() Context {
+	context := l.context
+	l.context = make([]byte, 0, 500)
+	if context != nil {
+		l.context = append(l.context, context...)
+	}
+	return Context{l}
+}
+
+// UpdateContext updates the internal logger's context.
+//
+// Use this method with caution. If unsure, prefer the With method.
+func (l *Logger) UpdateContext(update func(c Context) Context) {
+	if l == disabledLogger {
+		return
+	}
+	if cap(l.context) == 0 {
+		l.context = make([]byte, 0, 500)
+	}
+	c := update(Context{*l})
+	l.context = c.l.context
+}
+
+// Level creates a child logger with the minimum accepted level set to level.
+func (l Logger) Level(lvl Level) Logger {
+	l.level = lvl
+	return l
+}
+
+// Sample returns a logger with the s sampler.
+func (l Logger) Sample(s Sampler) Logger {
+	l.sampler = s
+	return l
+}
+
+// Hook returns a logger with the h Hook.
+func (l Logger) Hook(h Hook) Logger {
+	l.hooks = append(l.hooks, h)
+	return l
+}
+
+// Debug starts a new message with debug level.
+//
+// You must call Msg on the returned event in order to send the event.
+func (l *Logger) Debug() *Event {
+	return l.newEvent(DebugLevel, nil)
+}
+
+// Info starts a new message with info level.
+//
+// You must call Msg on the returned event in order to send the event.
+func (l *Logger) Info() *Event {
+	return l.newEvent(InfoLevel, nil)
+}
+
+// Warn starts a new message with warn level.
+//
+// You must call Msg on the returned event in order to send the event.
+func (l *Logger) Warn() *Event {
+	return l.newEvent(WarnLevel, nil)
+}
+
+// Error starts a new message with error level.
+//
+// You must call Msg on the returned event in order to send the event.
+func (l *Logger) Error() *Event {
+	return l.newEvent(ErrorLevel, nil)
+}
+
+// Fatal starts a new message with fatal level. The os.Exit(1) function
+// is called by the Msg method, which terminates the program immediately.
+//
+// You must call Msg on the returned event in order to send the event.
+func (l *Logger) Fatal() *Event {
+	return l.newEvent(FatalLevel, func(msg string) { os.Exit(1) })
+}
+
+// Panic starts a new message with panic level. The panic() function
+// is called by the Msg method, which stops the ordinary flow of a goroutine.
+//
+// You must call Msg on the returned event in order to send the event.
+func (l *Logger) Panic() *Event {
+	return l.newEvent(PanicLevel, func(msg string) { panic(msg) })
+}
+
+// WithLevel starts a new message with level. Unlike Fatal and Panic
+// methods, WithLevel does not terminate the program or stop the ordinary
+// flow of a gourotine when used with their respective levels.
+//
+// You must call Msg on the returned event in order to send the event.
+func (l *Logger) WithLevel(level Level) *Event {
+	switch level {
+	case DebugLevel:
+		return l.Debug()
+	case InfoLevel:
+		return l.Info()
+	case WarnLevel:
+		return l.Warn()
+	case ErrorLevel:
+		return l.Error()
+	case FatalLevel:
+		return l.newEvent(FatalLevel, nil)
+	case PanicLevel:
+		return l.newEvent(PanicLevel, nil)
+	case NoLevel:
+		return l.Log()
+	case Disabled:
+		return nil
+	default:
+		panic("zerolog: WithLevel(): invalid level: " + strconv.Itoa(int(level)))
+	}
+}
+
+// Log starts a new message with no level. Setting GlobalLevel to Disabled
+// will still disable events produced by this method.
+//
+// You must call Msg on the returned event in order to send the event.
+func (l *Logger) Log() *Event {
+	return l.newEvent(NoLevel, nil)
+}
+
+// Print sends a log event using debug level and no extra field.
+// Arguments are handled in the manner of fmt.Print.
+func (l *Logger) Print(v ...interface{}) {
+	if e := l.Debug(); e.Enabled() {
+		e.Msg(fmt.Sprint(v...))
+	}
+}
+
+// Printf sends a log event using debug level and no extra field.
+// Arguments are handled in the manner of fmt.Printf.
+func (l *Logger) Printf(format string, v ...interface{}) {
+	if e := l.Debug(); e.Enabled() {
+		e.Msg(fmt.Sprintf(format, v...))
+	}
+}
+
+// Write implements the io.Writer interface. This is useful to set as a writer
+// for the standard library log.
+func (l Logger) Write(p []byte) (n int, err error) {
+	n = len(p)
+	if n > 0 && p[n-1] == '\n' {
+		// Trim CR added by stdlog.
+		p = p[0 : n-1]
+	}
+	l.Log().Msg(string(p))
+	return
+}
+
+func (l *Logger) newEvent(level Level, done func(string)) *Event {
+	enabled := l.should(level)
+	if !enabled {
+		return nil
+	}
+	e := newEvent(l.w, level)
+	e.done = done
+	e.ch = l.hooks
+	if level != NoLevel {
+		e.Str(LevelFieldName, level.String())
+	}
+	if l.context != nil && len(l.context) > 0 {
+		e.buf = enc.AppendObjectData(e.buf, l.context)
+	}
+	return e
+}
+
+// should returns true if the log event should be logged.
+func (l *Logger) should(lvl Level) bool {
+	if lvl < l.level || lvl < GlobalLevel() {
+		return false
+	}
+	if l.sampler != nil && !samplingDisabled() {
+		return l.sampler.Sample(lvl)
+	}
+	return true
+}
diff --git a/vendor/github.com/rs/zerolog/log/log.go b/vendor/github.com/rs/zerolog/log/log.go
new file mode 100644
index 0000000..dd92ab9
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/log/log.go
@@ -0,0 +1,115 @@
+// Package log provides a global logger for zerolog.
+package log
+
+import (
+	"context"
+	"io"
+	"os"
+
+	"github.com/rs/zerolog"
+)
+
+// Logger is the global logger.
+var Logger = zerolog.New(os.Stderr).With().Timestamp().Logger()
+
+// Output duplicates the global logger and sets w as its output.
+func Output(w io.Writer) zerolog.Logger {
+	return Logger.Output(w)
+}
+
+// With creates a child logger with the field added to its context.
+func With() zerolog.Context {
+	return Logger.With()
+}
+
+// Level creates a child logger with the minimum accepted level set to level.
+func Level(level zerolog.Level) zerolog.Logger {
+	return Logger.Level(level)
+}
+
+// Sample returns a logger with the s sampler.
+func Sample(s zerolog.Sampler) zerolog.Logger {
+	return Logger.Sample(s)
+}
+
+// Hook returns a logger with the h Hook.
+func Hook(h zerolog.Hook) zerolog.Logger {
+	return Logger.Hook(h)
+}
+
+// Debug starts a new message with debug level.
+//
+// You must call Msg on the returned event in order to send the event.
+func Debug() *zerolog.Event {
+	return Logger.Debug()
+}
+
+// Info starts a new message with info level.
+//
+// You must call Msg on the returned event in order to send the event.
+func Info() *zerolog.Event {
+	return Logger.Info()
+}
+
+// Warn starts a new message with warn level.
+//
+// You must call Msg on the returned event in order to send the event.
+func Warn() *zerolog.Event {
+	return Logger.Warn()
+}
+
+// Error starts a new message with error level.
+//
+// You must call Msg on the returned event in order to send the event.
+func Error() *zerolog.Event {
+	return Logger.Error()
+}
+
+// Fatal starts a new message with fatal level. The os.Exit(1) function
+// is called by the Msg method.
+//
+// You must call Msg on the returned event in order to send the event.
+func Fatal() *zerolog.Event {
+	return Logger.Fatal()
+}
+
+// Panic starts a new message with panic level. The message is also sent
+// to the panic function.
+//
+// You must call Msg on the returned event in order to send the event.
+func Panic() *zerolog.Event {
+	return Logger.Panic()
+}
+
+// WithLevel starts a new message with level.
+//
+// You must call Msg on the returned event in order to send the event.
+func WithLevel(level zerolog.Level) *zerolog.Event {
+	return Logger.WithLevel(level)
+}
+
+// Log starts a new message with no level. Setting zerolog.GlobalLevel to
+// zerolog.Disabled will still disable events produced by this method.
+//
+// You must call Msg on the returned event in order to send the event.
+func Log() *zerolog.Event {
+	return Logger.Log()
+}
+
+// Print sends a log event using debug level and no extra field.
+// Arguments are handled in the manner of fmt.Print.
+func Print(v ...interface{}) {
+	Logger.Print(v...)
+}
+
+// Printf sends a log event using debug level and no extra field.
+// Arguments are handled in the manner of fmt.Printf.
+func Printf(format string, v ...interface{}) {
+	Logger.Printf(format, v...)
+}
+
+// Ctx returns the Logger associated with the ctx. If no logger
+// is associated, a disabled logger is returned.
+func Ctx(ctx context.Context) *zerolog.Logger {
+	return zerolog.Ctx(ctx)
+}
diff --git a/vendor/github.com/rs/zerolog/pretty.png b/vendor/github.com/rs/zerolog/pretty.png
new file mode 100644
index 0000000..34e4308
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/pretty.png
Binary files differ
diff --git a/vendor/github.com/rs/zerolog/sampler.go b/vendor/github.com/rs/zerolog/sampler.go
new file mode 100644
index 0000000..2360f0d
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/sampler.go
@@ -0,0 +1,126 @@
+package zerolog
+
+import (
+	"math/rand"
+	"sync/atomic"
+	"time"
+)
+
+var (
+	// Often samples log every ~ 10 events.
+	Often = RandomSampler(10)
+	// Sometimes samples log every ~ 100 events.
+	Sometimes = RandomSampler(100)
+	// Rarely samples log every ~ 1000 events.
+	Rarely = RandomSampler(1000)
+)
+
+// Sampler defines an interface to a log sampler.
+type Sampler interface {
+	// Sample returns true if the event should be part of the sample, false if
+	// the event should be dropped.
+	Sample(lvl Level) bool
+}
+
+// RandomSampler use a PRNG to randomly sample an event out of N events,
+// regardless of their level.
+type RandomSampler uint32
+
+// Sample implements the Sampler interface.
+func (s RandomSampler) Sample(lvl Level) bool {
+	if s <= 0 {
+		return false
+	}
+	if rand.Intn(int(s)) != 0 {
+		return false
+	}
+	return true
+}
+
+// BasicSampler is a sampler that will send every Nth events, regardless of
+// there level.
+type BasicSampler struct {
+	N       uint32
+	counter uint32
+}
+
+// Sample implements the Sampler interface.
+func (s *BasicSampler) Sample(lvl Level) bool {
+	c := atomic.AddUint32(&s.counter, 1)
+	return c%s.N == s.N-1
+}
+
+// BurstSampler lets Burst events pass per Period then pass the decision to
+// NextSampler. If Sampler is not set, all subsequent events are rejected.
+type BurstSampler struct {
+	// Burst is the maximum number of event per period allowed before calling
+	// NextSampler.
+	Burst uint32
+	// Period defines the burst period. If 0, NextSampler is always called.
+	Period time.Duration
+	// NextSampler is the sampler used after the burst is reached. If nil,
+	// events are always rejected after the burst.
+	NextSampler Sampler
+
+	counter uint32
+	resetAt int64
+}
+
+// Sample implements the Sampler interface.
+func (s *BurstSampler) Sample(lvl Level) bool {
+	if s.Burst > 0 && s.Period > 0 {
+		if s.inc() <= s.Burst {
+			return true
+		}
+	}
+	if s.NextSampler == nil {
+		return false
+	}
+	return s.NextSampler.Sample(lvl)
+}
+
+func (s *BurstSampler) inc() uint32 {
+	now := time.Now().UnixNano()
+	resetAt := atomic.LoadInt64(&s.resetAt)
+	var c uint32
+	if now > resetAt {
+		c = 1
+		atomic.StoreUint32(&s.counter, c)
+		newResetAt := now + s.Period.Nanoseconds()
+		reset := atomic.CompareAndSwapInt64(&s.resetAt, resetAt, newResetAt)
+		if !reset {
+			// Lost the race with another goroutine trying to reset.
+			c = atomic.AddUint32(&s.counter, 1)
+		}
+	} else {
+		c = atomic.AddUint32(&s.counter, 1)
+	}
+	return c
+}
+
+// LevelSampler applies a different sampler for each level.
+type LevelSampler struct {
+	DebugSampler, InfoSampler, WarnSampler, ErrorSampler Sampler
+}
+
+func (s LevelSampler) Sample(lvl Level) bool {
+	switch lvl {
+	case DebugLevel:
+		if s.DebugSampler != nil {
+			return s.DebugSampler.Sample(lvl)
+		}
+	case InfoLevel:
+		if s.InfoSampler != nil {
+			return s.InfoSampler.Sample(lvl)
+		}
+	case WarnLevel:
+		if s.WarnSampler != nil {
+			return s.WarnSampler.Sample(lvl)
+		}
+	case ErrorLevel:
+		if s.ErrorSampler != nil {
+			return s.ErrorSampler.Sample(lvl)
+		}
+	}
+	return true
+}
diff --git a/vendor/github.com/rs/zerolog/syslog.go b/vendor/github.com/rs/zerolog/syslog.go
new file mode 100644
index 0000000..82b470e
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/syslog.go
@@ -0,0 +1,57 @@
+// +build !windows
+// +build !binary_log
+
+package zerolog
+
+import (
+	"io"
+)
+
+// SyslogWriter is an interface matching a syslog.Writer struct.
+type SyslogWriter interface {
+	io.Writer
+	Debug(m string) error
+	Info(m string) error
+	Warning(m string) error
+	Err(m string) error
+	Emerg(m string) error
+	Crit(m string) error
+}
+
+type syslogWriter struct {
+	w SyslogWriter
+}
+
+// SyslogLevelWriter wraps a SyslogWriter and call the right syslog level
+// method matching the zerolog level.
+func SyslogLevelWriter(w SyslogWriter) LevelWriter {
+	return syslogWriter{w}
+}
+
+func (sw syslogWriter) Write(p []byte) (n int, err error) {
+	return sw.w.Write(p)
+}
+
+// WriteLevel implements LevelWriter interface.
+func (sw syslogWriter) WriteLevel(level Level, p []byte) (n int, err error) {
+	switch level {
+	case DebugLevel:
+		err = sw.w.Debug(string(p))
+	case InfoLevel:
+		err = sw.w.Info(string(p))
+	case WarnLevel:
+		err = sw.w.Warning(string(p))
+	case ErrorLevel:
+		err = sw.w.Err(string(p))
+	case FatalLevel:
+		err = sw.w.Emerg(string(p))
+	case PanicLevel:
+		err = sw.w.Crit(string(p))
+	case NoLevel:
+		err = sw.w.Info(string(p))
+	default:
+		panic("invalid level")
+	}
+	n = len(p)
+	return
+}
diff --git a/vendor/github.com/rs/zerolog/writer.go b/vendor/github.com/rs/zerolog/writer.go
new file mode 100644
index 0000000..a58d717
--- /dev/null
+++ b/vendor/github.com/rs/zerolog/writer.go
@@ -0,0 +1,100 @@
+package zerolog
+
+import (
+	"io"
+	"sync"
+)
+
+// LevelWriter defines as interface a writer may implement in order
+// to receive level information with payload.
+type LevelWriter interface {
+	io.Writer
+	WriteLevel(level Level, p []byte) (n int, err error)
+}
+
+type levelWriterAdapter struct {
+	io.Writer
+}
+
+func (lw levelWriterAdapter) WriteLevel(l Level, p []byte) (n int, err error) {
+	return lw.Write(p)
+}
+
+type syncWriter struct {
+	mu sync.Mutex
+	lw LevelWriter
+}
+
+// SyncWriter wraps w so that each call to Write is synchronized with a mutex.
+// This syncer can be the call to writer's Write method is not thread safe.
+// Note that os.File Write operation is using write() syscall which is supposed
+// to be thread-safe on POSIX systems. So there is no need to use this with
+// os.File on such systems as zerolog guaranties to issue a single Write call
+// per log event.
+func SyncWriter(w io.Writer) io.Writer {
+	if lw, ok := w.(LevelWriter); ok {
+		return &syncWriter{lw: lw}
+	}
+	return &syncWriter{lw: levelWriterAdapter{w}}
+}
+
+// Write implements the io.Writer interface.
+func (s *syncWriter) Write(p []byte) (n int, err error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.lw.Write(p)
+}
+
+// WriteLevel implements the LevelWriter interface.
+func (s *syncWriter) WriteLevel(l Level, p []byte) (n int, err error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	return s.lw.WriteLevel(l, p)
+}
+
+type multiLevelWriter struct {
+	writers []LevelWriter
+}
+
+func (t multiLevelWriter) Write(p []byte) (n int, err error) {
+	for _, w := range t.writers {
+		n, err = w.Write(p)
+		if err != nil {
+			return
+		}
+		if n != len(p) {
+			err = io.ErrShortWrite
+			return
+		}
+	}
+	return len(p), nil
+}
+
+func (t multiLevelWriter) WriteLevel(l Level, p []byte) (n int, err error) {
+	for _, w := range t.writers {
+		n, err = w.WriteLevel(l, p)
+		if err != nil {
+			return
+		}
+		if n != len(p) {
+			err = io.ErrShortWrite
+			return
+		}
+	}
+	return len(p), nil
+}
+
+// MultiLevelWriter creates a writer that duplicates its writes to all the
+// provided writers, similar to the Unix tee(1) command. If some writers
+// implement LevelWriter, their WriteLevel method will be used instead of Write.
+func MultiLevelWriter(writers ...io.Writer) LevelWriter {
+	lwriters := make([]LevelWriter, 0, len(writers))
+	for _, w := range writers {
+		if lw, ok := w.(LevelWriter); ok {
+			lwriters = append(lwriters, lw)
+		} else {
+			lwriters = append(lwriters, levelWriterAdapter{w})
+		}
+	}
+	return multiLevelWriter{lwriters}
+}
diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore
new file mode 100644
index 0000000..6b7d7d1
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.gitignore
@@ -0,0 +1,2 @@
+logrus
+vendor
diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml
new file mode 100644
index 0000000..a8f1545
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.travis.yml
@@ -0,0 +1,52 @@
+language: go
+go_import_path: github.com/sirupsen/logrus
+env:
+  - GOMAXPROCS=4 GORACE=halt_on_error=1
+matrix:
+  include:
+    - go: 1.10.x
+      install:
+        - go get github.com/stretchr/testify/assert
+        - go get golang.org/x/crypto/ssh/terminal
+        - go get golang.org/x/sys/unix
+        - go get golang.org/x/sys/windows
+      script:
+        - go test -race -v ./...
+    - go: 1.11.x
+      env: GO111MODULE=on
+      install:
+        - go mod download
+      script:
+        - go test -race -v ./...
+    - go: 1.11.x
+      env: GO111MODULE=off
+      install:
+        - go get github.com/stretchr/testify/assert
+        - go get golang.org/x/crypto/ssh/terminal
+        - go get golang.org/x/sys/unix
+        - go get golang.org/x/sys/windows
+      script:
+        - go test -race -v ./...
+    - go: 1.10.x
+      install:
+        - go get github.com/stretchr/testify/assert
+        - go get golang.org/x/crypto/ssh/terminal
+        - go get golang.org/x/sys/unix
+        - go get golang.org/x/sys/windows
+      script:
+        - go test -race -v -tags appengine ./...
+    - go: 1.11.x
+      env: GO111MODULE=on
+      install:
+        - go mod download
+      script:
+        - go test -race -v -tags appengine ./...
+    - go: 1.11.x
+      env: GO111MODULE=off
+      install:
+        - go get github.com/stretchr/testify/assert
+        - go get golang.org/x/crypto/ssh/terminal
+        - go get golang.org/x/sys/unix
+        - go get golang.org/x/sys/windows
+      script:
+        - go test -race -v -tags appengine ./...
diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000..cb85d9f
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,165 @@
+# 1.2.0
+This new release introduces:
+  * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued
+  * A new trace level named `Trace` whose level is below `Debug`
+  * A configurable exit function to be called upon a Fatal trace
+  * The `Level` object now implements `encoding.TextUnmarshaler` interface
+
+# 1.1.1
+This is a bug fix release.
+  * fix the build break on Solaris
+  * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized
+
+# 1.1.0
+This new release introduces:
+  * several fixes:
+    * a fix for a race condition on entry formatting
+    * proper cleanup of previously used entries before putting them back in the pool
+    * the extra new line at the end of message in text formatter has been removed
+  * a new global public API to check if a level is activated: IsLevelEnabled
+  * the following methods have been added to the Logger object
+    * IsLevelEnabled
+    * SetFormatter
+    * SetOutput
+    * ReplaceHooks
+  * introduction of go module
+  * an indent configuration for the json formatter
+  * output colour support for windows
+  * the field sort function is now configurable for text formatter
+  * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater
+
+# 1.0.6
+
+This new release introduces:
+  * a new api WithTime which allows to easily force the time of the log entry
+    which is mostly useful for logger wrapper
+  * a fix reverting the immutability of the entry given as parameter to the hooks
+    a new configuration field of the json formatter in order to put all the fields
+    in a nested dictionnary
+  * a new SetOutput method in the Logger
+  * a new configuration of the textformatter to configure the name of the default keys
+  * a new configuration of the text formatter to disable the level truncation
+
+# 1.0.5
+
+* Fix hooks race (#707)
+* Fix panic deadlock (#695)
+
+# 1.0.4
+
+* Fix race when adding hooks (#612)
+* Fix terminal check in AppEngine (#635)
+
+# 1.0.3
+
+* Replace example files with testable examples
+
+# 1.0.2
+
+* bug: quote non-string values in text formatter (#583)
+* Make (*Logger) SetLevel a public method
+
+# 1.0.1
+
+* bug: fix escaping in text formatter (#575)
+
+# 1.0.0
+
+* Officially changed name to lower-case
+* bug: colors on Windows 10 (#541)
+* bug: fix race in accessing level (#512)
+
+# 0.11.5
+
+* feature: add writer and writerlevel to entry (#372)
+
+# 0.11.4
+
+* bug: fix undefined variable on solaris (#493)
+
+# 0.11.3
+
+* formatter: configure quoting of empty values (#484)
+* formatter: configure quoting character (default is `"`) (#484)
+* bug: fix not importing io correctly in non-linux environments (#481)
+
+# 0.11.2
+
+* bug: fix windows terminal detection (#476)
+
+# 0.11.1
+
+* bug: fix tty detection with custom out (#471)
+
+# 0.11.0
+
+* performance: Use bufferpool to allocate (#370)
+* terminal: terminal detection for app-engine (#343)
+* feature: exit handler (#375)
+
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE
new file mode 100644
index 0000000..f090cb4
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md
new file mode 100644
index 0000000..3987310
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/README.md
@@ -0,0 +1,494 @@
+# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus)
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger.
+
+**Seeing weird case-sensitive problems?** It's in the past been possible to
+import Logrus as both upper- and lower-case. Due to the Go package environment,
+this caused issues in the community and we needed a standard. Some environments
+experienced problems with the upper-case variant, so the lower-case was decided.
+Everything using `logrus` will need to use the lower-case:
+`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
+
+To fix Glide, see [these
+comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
+For an in-depth explanation of the casing issue, see [this
+comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
+
+**Are you interested in assisting in maintaining Logrus?** Currently I have a
+lot of obligations, and I am unable to provide Logrus with the maintainership it
+needs. If you'd like to help, please reach out to me at `simon at author's
+username dot com`.
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+```
+To ensure this behaviour even if a TTY is attached, set your formatter as follows:
+
+```go
+	log.SetFormatter(&log.TextFormatter{
+		DisableColors: true,
+		FullTimestamp: true,
+	})
+```
+
+#### Logging Method Name
+
+If you wish to add the calling method as a field, instruct the logger via:
+```go
+log.SetReportCaller(true)
+```
+This adds the caller as 'method' like so:
+
+```json
+{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by",
+"time":"2014-03-10 19:57:38.562543129 -0400 EDT"}
+```
+
+```text
+time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin
+```
+Note that this does add measurable overhead - the cost will depend on the version of Go, but is
+between 20 and 40% in recent tests with 1.6 and 1.7.  You can validate this in your
+environment via benchmarks: 
+```
+go test -bench=.*CallerTracing
+```
+
+
+#### Case-sensitivity
+
+The organization's name was changed to lower-case--and this will not be changed
+back. If you are getting import conflicts due to case sensitivity, please use
+the lower-case import: `github.com/sirupsen/logrus`.
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+  log "github.com/sirupsen/logrus"
+)
+
+func main() {
+  log.WithFields(log.Fields{
+    "animal": "walrus",
+  }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+  "os"
+  log "github.com/sirupsen/logrus"
+)
+
+func init() {
+  // Log as JSON instead of the default ASCII formatter.
+  log.SetFormatter(&log.JSONFormatter{})
+
+  // Output to stdout instead of the default stderr
+  // Can be any io.Writer, see below for File example
+  log.SetOutput(os.Stdout)
+
+  // Only log the warning severity or above.
+  log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+  log.WithFields(log.Fields{
+    "animal": "walrus",
+    "size":   10,
+  }).Info("A group of walrus emerges from the ocean")
+
+  log.WithFields(log.Fields{
+    "omg":    true,
+    "number": 122,
+  }).Warn("The group's number increased tremendously!")
+
+  log.WithFields(log.Fields{
+    "omg":    true,
+    "number": 100,
+  }).Fatal("The ice breaks!")
+
+  // A common pattern is to re-use fields between logging statements by re-using
+  // the logrus.Entry returned from WithFields()
+  contextLogger := log.WithFields(log.Fields{
+    "common": "this is a common field",
+    "other": "I also should be logged always",
+  })
+
+  contextLogger.Info("I'll be logged with common and other field")
+  contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+  "os"
+  "github.com/sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+  // The API for setting attributes is a little different than the package level
+  // exported logger. See Godoc.
+  log.Out = os.Stdout
+
+  // You could set this to any `io.Writer` such as a file
+  // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
+  // if err == nil {
+  //  log.Out = file
+  // } else {
+  //  log.Info("Failed to log to file, using default stderr")
+  // }
+
+  log.WithFields(logrus.Fields{
+    "animal": "walrus",
+    "size":   10,
+  }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging through logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+  "event": event,
+  "topic": topic,
+  "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Default Fields
+
+Often it's helpful to have fields _always_ attached to log statements in an
+application or parts of one. For example, you may want to always log the
+`request_id` and `user_ip` in the context of a request. Instead of writing
+`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
+every line, you can create a `logrus.Entry` to pass around instead:
+
+```go
+requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
+requestLogger.Info("something happened on that request") # will log request_id and user_ip
+requestLogger.Warn("something not great happened")
+```
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+  log "github.com/sirupsen/logrus"
+  "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake"
+  logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
+  "log/syslog"
+)
+
+func init() {
+
+  // Use the Airbrake hook to report errors that have Error severity or above to
+  // an exception tracker. You can create custom hooks, see the Hooks section.
+  log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+
+  hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+  if err != nil {
+    log.Error("Unable to connect to local syslog daemon")
+  } else {
+    log.AddHook(hook)
+  }
+}
+```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+
+A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks)
+
+
+#### Level logging
+
+Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Trace("Something very low level.")
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+   the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+  log "github.com/sirupsen/logrus"
+)
+
+init() {
+  // do something here to set environment depending on an environment variable
+  // or command-line flag
+  if Environment == "production" {
+    log.SetFormatter(&log.JSONFormatter{})
+  } else {
+    // The TextFormatter is default, you don't actually have to do this.
+    log.SetFormatter(&log.TextFormatter{})
+  }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+  without colors.
+  * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+    field to `true`.  To force no colored output even if there is a TTY  set the
+    `DisableColors` field to `true`. For Windows, see
+    [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
+  * When colors are enabled, levels are truncated to 4 characters by default. To disable
+    truncation set the `DisableLevelTruncation` field to `true`.
+  * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
+* `logrus.JSONFormatter`. Logs fields as JSON.
+  * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
+
+Third party logging formatters:
+
+* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
+* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html).
+* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
+  // Note this doesn't include Time, Level and Message which are available on
+  // the Entry. Consult `godoc` on information about those fields or read the
+  // source of the official loggers.
+  serialized, err := json.Marshal(entry.Data)
+    if err != nil {
+      return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+    }
+  return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+    // create a stdlib log.Logger that writes to
+    // logrus.Logger.
+    ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+This means that we can override the standard library logger easily:
+
+```go
+logger := logrus.New()
+logger.Formatter = &logrus.JSONFormatter{}
+
+// Use logrus for standard log output
+// Note that `log` here references stdlib's log
+// Not logrus imported under the name `log`.
+log.SetOutput(logger.Writer())
+```
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+import(
+  "github.com/sirupsen/logrus"
+  "github.com/sirupsen/logrus/hooks/test"
+  "github.com/stretchr/testify/assert"
+  "testing"
+)
+
+func TestSomething(t*testing.T){
+  logger, hook := test.NewNullLogger()
+  logger.Error("Helloerror")
+
+  assert.Equal(t, 1, len(hook.Entries))
+  assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
+  assert.Equal(t, "Helloerror", hook.LastEntry().Message)
+
+  hook.Reset()
+  assert.Nil(t, hook.LastEntry())
+}
+```
+
+#### Fatal handlers
+
+Logrus can register one or more functions that will be called when any `fatal`
+level message is logged. The registered handlers will be executed before
+logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
+to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+
+```
+...
+handler := func() {
+  // gracefully shutdown something...
+}
+logrus.RegisterExitHandler(handler)
+...
+```
+
+#### Thread safety
+
+By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs.
+If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
+
+Situation when locking is not needed includes:
+
+* You have no hooks registered, or hooks calling is already thread-safe.
+
+* Writing to logger.Out is already thread-safe, for example:
+
+  1) logger.Out is protected by locks.
+
+  2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
+
+     (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go
new file mode 100644
index 0000000..8af9063
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/alt_exit.go
@@ -0,0 +1,64 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://github.com/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+	"fmt"
+	"os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+	defer func() {
+		if err := recover(); err != nil {
+			fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+		}
+	}()
+
+	handler()
+}
+
+func runHandlers() {
+	for _, handler := range handlers {
+		runHandler(handler)
+	}
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+	runHandlers()
+	os.Exit(code)
+}
+
+// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
+// all handlers. The handlers will also be invoked when any Fatal log entry is
+// made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+	handlers = append(handlers, handler)
+}
diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml
new file mode 100644
index 0000000..96c2ce1
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/appveyor.yml
@@ -0,0 +1,14 @@
+version: "{build}"

+platform: x64

+clone_folder: c:\gopath\src\github.com\sirupsen\logrus

+environment:  

+  GOPATH: c:\gopath

+branches:  

+  only:

+    - master

+install:  

+  - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%

+  - go version

+build_script:  

+  - go get -t

+  - go test

diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go
new file mode 100644
index 0000000..da67aba
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+  package main
+
+  import (
+    log "github.com/sirupsen/logrus"
+  )
+
+  func main() {
+    log.WithFields(log.Fields{
+      "animal": "walrus",
+      "number": 1,
+      "size":   10,
+    }).Info("A walrus appears")
+  }
+
+Output:
+  time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/sirupsen/logrus
+*/
+package logrus
diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go
new file mode 100644
index 0000000..df6d188
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/entry.go
@@ -0,0 +1,393 @@
+package logrus
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"reflect"
+	"runtime"
+	"strings"
+	"sync"
+	"time"
+)
+
+var (
+	bufferPool *sync.Pool
+
+	// qualified package name, cached at first use
+	logrusPackage string
+
+	// Positions in the call stack when tracing to report the calling method
+	minimumCallerDepth int
+
+	// Used for caller information initialisation
+	callerInitOnce sync.Once
+)
+
+const (
+	maximumCallerDepth int = 25
+	knownLogrusFrames  int = 4
+)
+
+func init() {
+	bufferPool = &sync.Pool{
+		New: func() interface{} {
+			return new(bytes.Buffer)
+		},
+	}
+
+	// start at the bottom of the stack before the package-name cache is primed
+	minimumCallerDepth = 1
+}
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Trace, Debug,
+// Info, Warn, Error, Fatal or Panic is called on it. These objects can be
+// reused and passed around as much as you wish to avoid field duplication.
+type Entry struct {
+	Logger *Logger
+
+	// Contains all the fields set by the user.
+	Data Fields
+
+	// Time at which the log entry was created
+	Time time.Time
+
+	// Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic
+	// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
+	Level Level
+
+	// Calling method, with package name
+	Caller *runtime.Frame
+
+	// Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic
+	Message string
+
+	// When formatter is called in entry.log(), a Buffer may be set to entry
+	Buffer *bytes.Buffer
+
+	// err may contain a field formatting error
+	err string
+}
+
+func NewEntry(logger *Logger) *Entry {
+	return &Entry{
+		Logger: logger,
+		// Default is three fields, plus one optional.  Give a little extra room.
+		Data: make(Fields, 6),
+	}
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+	serialized, err := entry.Logger.Formatter.Format(entry)
+	if err != nil {
+		return "", err
+	}
+	str := string(serialized)
+	return str, nil
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+	return entry.WithField(ErrorKey, err)
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+	return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+	data := make(Fields, len(entry.Data)+len(fields))
+	for k, v := range entry.Data {
+		data[k] = v
+	}
+	fieldErr := entry.err
+	for k, v := range fields {
+		isErrField := false
+		if t := reflect.TypeOf(v); t != nil {
+			switch t.Kind() {
+			case reflect.Func:
+				isErrField = true
+			case reflect.Ptr:
+				isErrField = t.Elem().Kind() == reflect.Func
+			}
+		}
+		if isErrField {
+			tmp := fmt.Sprintf("can not add field %q", k)
+			if fieldErr != "" {
+				fieldErr = entry.err + ", " + tmp
+			} else {
+				fieldErr = tmp
+			}
+		} else {
+			data[k] = v
+		}
+	}
+	return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr}
+}
+
+// Overrides the time of the Entry.
+func (entry *Entry) WithTime(t time.Time) *Entry {
+	return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t, err: entry.err}
+}
+
+// getPackageName reduces a fully qualified function name to the package name
+// There really ought to be to be a better way...
+func getPackageName(f string) string {
+	for {
+		lastPeriod := strings.LastIndex(f, ".")
+		lastSlash := strings.LastIndex(f, "/")
+		if lastPeriod > lastSlash {
+			f = f[:lastPeriod]
+		} else {
+			break
+		}
+	}
+
+	return f
+}
+
+// getCaller retrieves the name of the first non-logrus calling function
+func getCaller() *runtime.Frame {
+	// Restrict the lookback frames to avoid runaway lookups
+	pcs := make([]uintptr, maximumCallerDepth)
+	depth := runtime.Callers(minimumCallerDepth, pcs)
+	frames := runtime.CallersFrames(pcs[:depth])
+
+	// cache this package's fully-qualified name
+	callerInitOnce.Do(func() {
+		logrusPackage = getPackageName(runtime.FuncForPC(pcs[0]).Name())
+
+		// now that we have the cache, we can skip a minimum count of known-logrus functions
+		// XXX this is dubious, the number of frames may vary store an entry in a logger interface
+		minimumCallerDepth = knownLogrusFrames
+	})
+
+	for f, again := frames.Next(); again; f, again = frames.Next() {
+		pkg := getPackageName(f.Function)
+
+		// If the caller isn't part of this package, we're done
+		if pkg != logrusPackage {
+			return &f
+		}
+	}
+
+	// if we got here, we failed to find the caller's context
+	return nil
+}
+
+func (entry Entry) HasCaller() (has bool) {
+	return entry.Logger != nil &&
+		entry.Logger.ReportCaller &&
+		entry.Caller != nil
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+	var buffer *bytes.Buffer
+
+	// Default to now, but allow users to override if they want.
+	//
+	// We don't have to worry about polluting future calls to Entry#log()
+	// with this assignment because this function is declared with a
+	// non-pointer receiver.
+	if entry.Time.IsZero() {
+		entry.Time = time.Now()
+	}
+
+	entry.Level = level
+	entry.Message = msg
+	if entry.Logger.ReportCaller {
+		entry.Caller = getCaller()
+	}
+
+	entry.fireHooks()
+
+	buffer = bufferPool.Get().(*bytes.Buffer)
+	buffer.Reset()
+	defer bufferPool.Put(buffer)
+	entry.Buffer = buffer
+
+	entry.write()
+
+	entry.Buffer = nil
+
+	// To avoid Entry#log() returning a value that only would make sense for
+	// panic() to use in Entry#Panic(), we avoid the allocation by checking
+	// directly here.
+	if level <= PanicLevel {
+		panic(&entry)
+	}
+}
+
+func (entry *Entry) fireHooks() {
+	entry.Logger.mu.Lock()
+	defer entry.Logger.mu.Unlock()
+	err := entry.Logger.Hooks.Fire(entry.Level, entry)
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+	}
+}
+
+func (entry *Entry) write() {
+	entry.Logger.mu.Lock()
+	defer entry.Logger.mu.Unlock()
+	serialized, err := entry.Logger.Formatter.Format(entry)
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+	} else {
+		_, err = entry.Logger.Out.Write(serialized)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+		}
+	}
+}
+
+func (entry *Entry) Log(level Level, args ...interface{}) {
+	if entry.Logger.IsLevelEnabled(level) {
+		entry.log(level, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Trace(args ...interface{}) {
+	entry.Log(TraceLevel, args...)
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+	entry.Log(DebugLevel, args...)
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+	entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+	entry.Log(InfoLevel, args...)
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+	entry.Log(WarnLevel, args...)
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+	entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+	entry.Log(ErrorLevel, args...)
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+	entry.Log(FatalLevel, args...)
+	entry.Logger.Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+	entry.Log(PanicLevel, args...)
+	panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Logf(level Level, format string, args ...interface{}) {
+	entry.Log(level, fmt.Sprintf(format, args...))
+}
+
+func (entry *Entry) Tracef(format string, args ...interface{}) {
+	entry.Logf(TraceLevel, format, args...)
+}
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+	entry.Logf(DebugLevel, format, args...)
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+	entry.Logf(InfoLevel, format, args...)
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+	entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+	entry.Logf(WarnLevel, format, args...)
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+	entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+	entry.Logf(ErrorLevel, format, args...)
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+	entry.Logf(FatalLevel, format, args...)
+	entry.Logger.Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+	entry.Logf(PanicLevel, format, args...)
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Logln(level Level, args ...interface{}) {
+	if entry.Logger.IsLevelEnabled(level) {
+		entry.Log(level, entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Traceln(args ...interface{}) {
+	entry.Logln(TraceLevel, args...)
+}
+
+func (entry *Entry) Debugln(args ...interface{}) {
+	entry.Logln(DebugLevel, args...)
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+	entry.Logln(InfoLevel, args...)
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+	entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+	entry.Logln(WarnLevel, args...)
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+	entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+	entry.Logln(ErrorLevel, args...)
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+	entry.Logln(FatalLevel, args...)
+	entry.Logger.Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+	entry.Logln(PanicLevel, args...)
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+	msg := fmt.Sprintln(args...)
+	return msg[:len(msg)-1]
+}
diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go
new file mode 100644
index 0000000..7342613
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/exported.go
@@ -0,0 +1,219 @@
+package logrus
+
+import (
+	"io"
+	"time"
+)
+
+var (
+	// std is the name of the standard logger in stdlib `log`
+	std = New()
+)
+
+func StandardLogger() *Logger {
+	return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+	std.SetOutput(out)
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+	std.SetFormatter(formatter)
+}
+
+// SetReportCaller sets whether the standard logger will include the calling
+// method as a field.
+func SetReportCaller(include bool) {
+	std.SetReportCaller(include)
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+	std.SetLevel(level)
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+	return std.GetLevel()
+}
+
+// IsLevelEnabled checks if the log level of the standard logger is greater than the level param
+func IsLevelEnabled(level Level) bool {
+	return std.IsLevelEnabled(level)
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+	std.AddHook(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+	return std.WithField(ErrorKey, err)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+	return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+	return std.WithFields(fields)
+}
+
+// WithTime creats an entry from the standard logger and overrides the time of
+// logs generated with it.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithTime(t time.Time) *Entry {
+	return std.WithTime(t)
+}
+
+// Trace logs a message at level Trace on the standard logger.
+func Trace(args ...interface{}) {
+	std.Trace(args...)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+	std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+	std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+	std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+	std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+	std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+	std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+	std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
+func Fatal(args ...interface{}) {
+	std.Fatal(args...)
+}
+
+// Tracef logs a message at level Trace on the standard logger.
+func Tracef(format string, args ...interface{}) {
+	std.Tracef(format, args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+	std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+	std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+	std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+	std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+	std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+	std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+	std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
+func Fatalf(format string, args ...interface{}) {
+	std.Fatalf(format, args...)
+}
+
+// Traceln logs a message at level Trace on the standard logger.
+func Traceln(args ...interface{}) {
+	std.Traceln(args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+	std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+	std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+	std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+	std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+	std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+	std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+	std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
+func Fatalln(args ...interface{}) {
+	std.Fatalln(args...)
+}
diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go
new file mode 100644
index 0000000..4088837
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/formatter.go
@@ -0,0 +1,78 @@
+package logrus
+
+import "time"
+
+// Default key names for the default fields
+const (
+	defaultTimestampFormat = time.RFC3339
+	FieldKeyMsg            = "msg"
+	FieldKeyLevel          = "level"
+	FieldKeyTime           = "time"
+	FieldKeyLogrusError    = "logrus_error"
+	FieldKeyFunc           = "func"
+	FieldKeyFile           = "file"
+)
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+	Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+//  logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+//  {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) {
+	timeKey := fieldMap.resolve(FieldKeyTime)
+	if t, ok := data[timeKey]; ok {
+		data["fields."+timeKey] = t
+		delete(data, timeKey)
+	}
+
+	msgKey := fieldMap.resolve(FieldKeyMsg)
+	if m, ok := data[msgKey]; ok {
+		data["fields."+msgKey] = m
+		delete(data, msgKey)
+	}
+
+	levelKey := fieldMap.resolve(FieldKeyLevel)
+	if l, ok := data[levelKey]; ok {
+		data["fields."+levelKey] = l
+		delete(data, levelKey)
+	}
+
+	logrusErrKey := fieldMap.resolve(FieldKeyLogrusError)
+	if l, ok := data[logrusErrKey]; ok {
+		data["fields."+logrusErrKey] = l
+		delete(data, logrusErrKey)
+	}
+
+	// If reportCaller is not set, 'func' will not conflict.
+	if reportCaller {
+		funcKey := fieldMap.resolve(FieldKeyFunc)
+		if l, ok := data[funcKey]; ok {
+			data["fields."+funcKey] = l
+		}
+		fileKey := fieldMap.resolve(FieldKeyFile)
+		if l, ok := data[fileKey]; ok {
+			data["fields."+fileKey] = l
+		}
+	}
+}
diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod
new file mode 100644
index 0000000..94574cc
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/go.mod
@@ -0,0 +1,11 @@
+module github.com/sirupsen/logrus
+
+require (
+	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/konsorten/go-windows-terminal-sequences v1.0.1
+	github.com/pmezard/go-difflib v1.0.0 // indirect
+	github.com/stretchr/objx v0.1.1 // indirect
+	github.com/stretchr/testify v1.2.2
+	golang.org/x/crypto v0.0.0-20180904163835-0709b304e793
+	golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33
+)
diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum
new file mode 100644
index 0000000..133d34a
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/go.sum
@@ -0,0 +1,15 @@
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs=
+github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go
new file mode 100644
index 0000000..3f151cd
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+	Levels() []Level
+	Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+	for _, level := range hook.Levels() {
+		hooks[level] = append(hooks[level], hook)
+	}
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+	for _, hook := range hooks[level] {
+		if err := hook.Fire(entry); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000..2605753
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/json_formatter.go
@@ -0,0 +1,105 @@
+package logrus
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+)
+
+type fieldKey string
+
+// FieldMap allows customization of the key names for default fields.
+type FieldMap map[fieldKey]string
+
+func (f FieldMap) resolve(key fieldKey) string {
+	if k, ok := f[key]; ok {
+		return k
+	}
+
+	return string(key)
+}
+
+// JSONFormatter formats logs into parsable json
+type JSONFormatter struct {
+	// TimestampFormat sets the format used for marshaling timestamps.
+	TimestampFormat string
+
+	// DisableTimestamp allows disabling automatic timestamps in output
+	DisableTimestamp bool
+
+	// DataKey allows users to put all the log entry parameters into a nested dictionary at a given key.
+	DataKey string
+
+	// FieldMap allows users to customize the names of keys for default fields.
+	// As an example:
+	// formatter := &JSONFormatter{
+	//   	FieldMap: FieldMap{
+	// 		 FieldKeyTime:  "@timestamp",
+	// 		 FieldKeyLevel: "@level",
+	// 		 FieldKeyMsg:   "@message",
+	// 		 FieldKeyFunc:  "@caller",
+	//    },
+	// }
+	FieldMap FieldMap
+
+	// PrettyPrint will indent all json logs
+	PrettyPrint bool
+}
+
+// Format renders a single log entry
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+	data := make(Fields, len(entry.Data)+4)
+	for k, v := range entry.Data {
+		switch v := v.(type) {
+		case error:
+			// Otherwise errors are ignored by `encoding/json`
+			// https://github.com/sirupsen/logrus/issues/137
+			data[k] = v.Error()
+		default:
+			data[k] = v
+		}
+	}
+
+	if f.DataKey != "" {
+		newData := make(Fields, 4)
+		newData[f.DataKey] = data
+		data = newData
+	}
+
+	prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
+
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = defaultTimestampFormat
+	}
+
+	if entry.err != "" {
+		data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err
+	}
+	if !f.DisableTimestamp {
+		data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
+	}
+	data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
+	data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
+	if entry.HasCaller() {
+		data[f.FieldMap.resolve(FieldKeyFunc)] = entry.Caller.Function
+		data[f.FieldMap.resolve(FieldKeyFile)] = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
+	}
+
+	var b *bytes.Buffer
+	if entry.Buffer != nil {
+		b = entry.Buffer
+	} else {
+		b = &bytes.Buffer{}
+	}
+
+	encoder := json.NewEncoder(b)
+	if f.PrettyPrint {
+		encoder.SetIndent("", "  ")
+	}
+	if err := encoder.Encode(data); err != nil {
+		return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+	}
+
+	return b.Bytes(), nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go
new file mode 100644
index 0000000..9bf64e2
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logger.go
@@ -0,0 +1,343 @@
+package logrus
+
+import (
+	"io"
+	"os"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+type Logger struct {
+	// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+	// file, or leave it default which is `os.Stderr`. You can also set this to
+	// something more adventurous, such as logging to Kafka.
+	Out io.Writer
+	// Hooks for the logger instance. These allow firing events based on logging
+	// levels and log entries. For example, to send errors to an error tracking
+	// service, log to StatsD or dump the core on fatal errors.
+	Hooks LevelHooks
+	// All log entries pass through the formatter before logged to Out. The
+	// included formatters are `TextFormatter` and `JSONFormatter` for which
+	// TextFormatter is the default. In development (when a TTY is attached) it
+	// logs with colors, but to a file it wouldn't. You can easily implement your
+	// own that implements the `Formatter` interface, see the `README` or included
+	// formatters for examples.
+	Formatter Formatter
+
+	// Flag for whether to log caller info (off by default)
+	ReportCaller bool
+
+	// The logging level the logger should log at. This is typically (and defaults
+	// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+	// logged.
+	Level Level
+	// Used to sync writing to the log. Locking is enabled by Default
+	mu MutexWrap
+	// Reusable empty entry
+	entryPool sync.Pool
+	// Function to exit the application, defaults to `os.Exit()`
+	ExitFunc exitFunc
+}
+
+type exitFunc func(int)
+
+type MutexWrap struct {
+	lock     sync.Mutex
+	disabled bool
+}
+
+func (mw *MutexWrap) Lock() {
+	if !mw.disabled {
+		mw.lock.Lock()
+	}
+}
+
+func (mw *MutexWrap) Unlock() {
+	if !mw.disabled {
+		mw.lock.Unlock()
+	}
+}
+
+func (mw *MutexWrap) Disable() {
+	mw.disabled = true
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+//    var log = &Logger{
+//      Out: os.Stderr,
+//      Formatter: new(JSONFormatter),
+//      Hooks: make(LevelHooks),
+//      Level: logrus.DebugLevel,
+//    }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+	return &Logger{
+		Out:          os.Stderr,
+		Formatter:    new(TextFormatter),
+		Hooks:        make(LevelHooks),
+		Level:        InfoLevel,
+		ExitFunc:     os.Exit,
+		ReportCaller: false,
+	}
+}
+
+func (logger *Logger) newEntry() *Entry {
+	entry, ok := logger.entryPool.Get().(*Entry)
+	if ok {
+		return entry
+	}
+	return NewEntry(logger)
+}
+
+func (logger *Logger) releaseEntry(entry *Entry) {
+	entry.Data = map[string]interface{}{}
+	logger.entryPool.Put(entry)
+}
+
+// Adds a field to the log entry, note that it doesn't log until you call
+// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithFields(fields)
+}
+
+// Add an error as single field to the log entry.  All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithError(err)
+}
+
+// Overrides the time of the log entry.
+func (logger *Logger) WithTime(t time.Time) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithTime(t)
+}
+
+func (logger *Logger) Logf(level Level, format string, args ...interface{}) {
+	if logger.IsLevelEnabled(level) {
+		entry := logger.newEntry()
+		entry.Logf(level, format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Tracef(format string, args ...interface{}) {
+	logger.Logf(TraceLevel, format, args...)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+	logger.Logf(DebugLevel, format, args...)
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+	logger.Logf(InfoLevel, format, args...)
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Printf(format, args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+	logger.Logf(WarnLevel, format, args...)
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+	logger.Warnf(format, args...)
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+	logger.Logf(ErrorLevel, format, args...)
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+	logger.Logf(FatalLevel, format, args...)
+	logger.Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+	logger.Logf(PanicLevel, format, args...)
+}
+
+func (logger *Logger) Log(level Level, args ...interface{}) {
+	if logger.IsLevelEnabled(level) {
+		entry := logger.newEntry()
+		entry.Log(level, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Trace(args ...interface{}) {
+	logger.Log(TraceLevel, args...)
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+	logger.Log(DebugLevel, args...)
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+	logger.Log(InfoLevel, args...)
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Info(args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+	logger.Log(WarnLevel, args...)
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+	logger.Warn(args...)
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+	logger.Log(ErrorLevel, args...)
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+	logger.Log(FatalLevel, args...)
+	logger.Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+	logger.Log(PanicLevel, args...)
+}
+
+func (logger *Logger) Logln(level Level, args ...interface{}) {
+	if logger.IsLevelEnabled(level) {
+		entry := logger.newEntry()
+		entry.Logln(level, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Traceln(args ...interface{}) {
+	logger.Logln(TraceLevel, args...)
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+	logger.Logln(DebugLevel, args...)
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+	logger.Logln(InfoLevel, args...)
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Println(args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+	logger.Logln(WarnLevel, args...)
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+	logger.Warn(args...)
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+	logger.Logln(ErrorLevel, args...)
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+	logger.Logln(FatalLevel, args...)
+	logger.Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+	logger.Logln(PanicLevel, args...)
+}
+
+func (logger *Logger) Exit(code int) {
+	runHandlers()
+	if logger.ExitFunc == nil {
+		logger.ExitFunc = os.Exit
+	}
+	logger.ExitFunc(code)
+}
+
+//When file is opened with appending mode, it's safe to
+//write concurrently to a file (within 4k message on Linux).
+//In these cases user can choose to disable the lock.
+func (logger *Logger) SetNoLock() {
+	logger.mu.Disable()
+}
+
+func (logger *Logger) level() Level {
+	return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
+}
+
+// SetLevel sets the logger level.
+func (logger *Logger) SetLevel(level Level) {
+	atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
+}
+
+// GetLevel returns the logger level.
+func (logger *Logger) GetLevel() Level {
+	return logger.level()
+}
+
+// AddHook adds a hook to the logger hooks.
+func (logger *Logger) AddHook(hook Hook) {
+	logger.mu.Lock()
+	defer logger.mu.Unlock()
+	logger.Hooks.Add(hook)
+}
+
+// IsLevelEnabled checks if the log level of the logger is greater than the level param
+func (logger *Logger) IsLevelEnabled(level Level) bool {
+	return logger.level() >= level
+}
+
+// SetFormatter sets the logger formatter.
+func (logger *Logger) SetFormatter(formatter Formatter) {
+	logger.mu.Lock()
+	defer logger.mu.Unlock()
+	logger.Formatter = formatter
+}
+
+// SetOutput sets the logger output.
+func (logger *Logger) SetOutput(output io.Writer) {
+	logger.mu.Lock()
+	defer logger.mu.Unlock()
+	logger.Out = output
+}
+
+func (logger *Logger) SetReportCaller(reportCaller bool) {
+	logger.mu.Lock()
+	defer logger.mu.Unlock()
+	logger.ReportCaller = reportCaller
+}
+
+// ReplaceHooks replaces the logger hooks and returns the old ones
+func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks {
+	logger.mu.Lock()
+	oldHooks := logger.Hooks
+	logger.Hooks = hooks
+	logger.mu.Unlock()
+	return oldHooks
+}
diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go
new file mode 100644
index 0000000..c1ca889
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logrus.go
@@ -0,0 +1,186 @@
+package logrus
+
+import (
+	"fmt"
+	"log"
+	"strings"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint32
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+	if b, err := level.MarshalText(); err == nil {
+		return string(b)
+	} else {
+		return "unknown"
+	}
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+	switch strings.ToLower(lvl) {
+	case "panic":
+		return PanicLevel, nil
+	case "fatal":
+		return FatalLevel, nil
+	case "error":
+		return ErrorLevel, nil
+	case "warn", "warning":
+		return WarnLevel, nil
+	case "info":
+		return InfoLevel, nil
+	case "debug":
+		return DebugLevel, nil
+	case "trace":
+		return TraceLevel, nil
+	}
+
+	var l Level
+	return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (level *Level) UnmarshalText(text []byte) error {
+	l, err := ParseLevel(string(text))
+	if err != nil {
+		return err
+	}
+
+	*level = Level(l)
+
+	return nil
+}
+
+func (level Level) MarshalText() ([]byte, error) {
+	switch level {
+	case TraceLevel:
+		return []byte("trace"), nil
+	case DebugLevel:
+		return []byte("debug"), nil
+	case InfoLevel:
+		return []byte("info"), nil
+	case WarnLevel:
+		return []byte("warning"), nil
+	case ErrorLevel:
+		return []byte("error"), nil
+	case FatalLevel:
+		return []byte("fatal"), nil
+	case PanicLevel:
+		return []byte("panic"), nil
+	}
+
+	return nil, fmt.Errorf("not a valid lorus level %q", level)
+}
+
+// A constant exposing all logging levels
+var AllLevels = []Level{
+	PanicLevel,
+	FatalLevel,
+	ErrorLevel,
+	WarnLevel,
+	InfoLevel,
+	DebugLevel,
+	TraceLevel,
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+	// PanicLevel level, highest level of severity. Logs and then calls panic with the
+	// message passed to Debug, Info, ...
+	PanicLevel Level = iota
+	// FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the
+	// logging level is set to Panic.
+	FatalLevel
+	// ErrorLevel level. Logs. Used for errors that should definitely be noted.
+	// Commonly used for hooks to send errors to an error tracking service.
+	ErrorLevel
+	// WarnLevel level. Non-critical entries that deserve eyes.
+	WarnLevel
+	// InfoLevel level. General operational entries about what's going on inside the
+	// application.
+	InfoLevel
+	// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+	DebugLevel
+	// TraceLevel level. Designates finer-grained informational events than the Debug.
+	TraceLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+	_ StdLogger = &log.Logger{}
+	_ StdLogger = &Entry{}
+	_ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+	Print(...interface{})
+	Printf(string, ...interface{})
+	Println(...interface{})
+
+	Fatal(...interface{})
+	Fatalf(string, ...interface{})
+	Fatalln(...interface{})
+
+	Panic(...interface{})
+	Panicf(string, ...interface{})
+	Panicln(...interface{})
+}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+	WithField(key string, value interface{}) *Entry
+	WithFields(fields Fields) *Entry
+	WithError(err error) *Entry
+
+	Debugf(format string, args ...interface{})
+	Infof(format string, args ...interface{})
+	Printf(format string, args ...interface{})
+	Warnf(format string, args ...interface{})
+	Warningf(format string, args ...interface{})
+	Errorf(format string, args ...interface{})
+	Fatalf(format string, args ...interface{})
+	Panicf(format string, args ...interface{})
+
+	Debug(args ...interface{})
+	Info(args ...interface{})
+	Print(args ...interface{})
+	Warn(args ...interface{})
+	Warning(args ...interface{})
+	Error(args ...interface{})
+	Fatal(args ...interface{})
+	Panic(args ...interface{})
+
+	Debugln(args ...interface{})
+	Infoln(args ...interface{})
+	Println(args ...interface{})
+	Warnln(args ...interface{})
+	Warningln(args ...interface{})
+	Errorln(args ...interface{})
+	Fatalln(args ...interface{})
+	Panicln(args ...interface{})
+
+	// IsDebugEnabled() bool
+	// IsInfoEnabled() bool
+	// IsWarnEnabled() bool
+	// IsErrorEnabled() bool
+	// IsFatalEnabled() bool
+	// IsPanicEnabled() bool
+}
+
+// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is
+// here for consistancy. Do not use. Use Logger or Entry instead.
+type Ext1FieldLogger interface {
+	FieldLogger
+	Tracef(format string, args ...interface{})
+	Trace(args ...interface{})
+	Traceln(args ...interface{})
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_aix.go b/vendor/github.com/sirupsen/logrus/terminal_check_aix.go
new file mode 100644
index 0000000..04fdb7b
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_aix.go
@@ -0,0 +1,9 @@
+// +build !appengine,!js,!windows,aix
+
+package logrus
+
+import "io"
+
+func checkIfTerminal(w io.Writer) bool {
+	return false
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
new file mode 100644
index 0000000..2403de9
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
@@ -0,0 +1,11 @@
+// +build appengine
+
+package logrus
+
+import (
+	"io"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+	return true
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go
new file mode 100644
index 0000000..0c20975
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_js.go
@@ -0,0 +1,11 @@
+// +build js
+
+package logrus
+
+import (
+	"io"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+	return false
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
new file mode 100644
index 0000000..d465565
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
@@ -0,0 +1,19 @@
+// +build !appengine,!js,!windows,!aix
+
+package logrus
+
+import (
+	"io"
+	"os"
+
+	"golang.org/x/crypto/ssh/terminal"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+	switch v := w.(type) {
+	case *os.File:
+		return terminal.IsTerminal(int(v.Fd()))
+	default:
+		return false
+	}
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
new file mode 100644
index 0000000..3b9d286
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
@@ -0,0 +1,20 @@
+// +build !appengine,!js,windows
+
+package logrus
+
+import (
+	"io"
+	"os"
+	"syscall"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+	switch v := w.(type) {
+	case *os.File:
+		var mode uint32
+		err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode)
+		return err == nil
+	default:
+		return false
+	}
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 0000000..3dbd237
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,8 @@
+// +build !windows
+
+package logrus
+
+import "io"
+
+func initTerminal(w io.Writer) {
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_windows.go b/vendor/github.com/sirupsen/logrus/terminal_windows.go
new file mode 100644
index 0000000..b4ef528
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,18 @@
+// +build !appengine,!js,windows
+
+package logrus
+
+import (
+	"io"
+	"os"
+	"syscall"
+
+	sequences "github.com/konsorten/go-windows-terminal-sequences"
+)
+
+func initTerminal(w io.Writer) {
+	switch v := w.(type) {
+	case *os.File:
+		sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true)
+	}
+}
diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000..fb21649
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/text_formatter.go
@@ -0,0 +1,273 @@
+package logrus
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"runtime"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+)
+
+const (
+	nocolor = 0
+	red     = 31
+	green   = 32
+	yellow  = 33
+	blue    = 36
+	gray    = 37
+)
+
+var (
+	baseTimestamp time.Time
+	emptyFieldMap FieldMap
+)
+
+func init() {
+	baseTimestamp = time.Now()
+}
+
+// TextFormatter formats logs into text
+type TextFormatter struct {
+	// Set to true to bypass checking for a TTY before outputting colors.
+	ForceColors bool
+
+	// Force disabling colors.
+	DisableColors bool
+
+	// Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/
+	EnvironmentOverrideColors bool
+
+	// Disable timestamp logging. useful when output is redirected to logging
+	// system that already adds timestamps.
+	DisableTimestamp bool
+
+	// Enable logging the full timestamp when a TTY is attached instead of just
+	// the time passed since beginning of execution.
+	FullTimestamp bool
+
+	// TimestampFormat to use for display when a full timestamp is printed
+	TimestampFormat string
+
+	// The fields are sorted by default for a consistent output. For applications
+	// that log extremely frequently and don't use the JSON formatter this may not
+	// be desired.
+	DisableSorting bool
+
+	// The keys sorting function, when uninitialized it uses sort.Strings.
+	SortingFunc func([]string)
+
+	// Disables the truncation of the level text to 4 characters.
+	DisableLevelTruncation bool
+
+	// QuoteEmptyFields will wrap empty fields in quotes if true
+	QuoteEmptyFields bool
+
+	// Whether the logger's out is to a terminal
+	isTerminal bool
+
+	// FieldMap allows users to customize the names of keys for default fields.
+	// As an example:
+	// formatter := &TextFormatter{
+	//     FieldMap: FieldMap{
+	//         FieldKeyTime:  "@timestamp",
+	//         FieldKeyLevel: "@level",
+	//         FieldKeyMsg:   "@message"}}
+	FieldMap FieldMap
+
+	terminalInitOnce sync.Once
+}
+
+func (f *TextFormatter) init(entry *Entry) {
+	if entry.Logger != nil {
+		f.isTerminal = checkIfTerminal(entry.Logger.Out)
+
+		if f.isTerminal {
+			initTerminal(entry.Logger.Out)
+		}
+	}
+}
+
+func (f *TextFormatter) isColored() bool {
+	isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows"))
+
+	if f.EnvironmentOverrideColors {
+		if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" {
+			isColored = true
+		} else if ok && force == "0" {
+			isColored = false
+		} else if os.Getenv("CLICOLOR") == "0" {
+			isColored = false
+		}
+	}
+
+	return isColored && !f.DisableColors
+}
+
+// Format renders a single log entry
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+	data := make(Fields)
+	for k, v := range entry.Data {
+		data[k] = v
+	}
+	prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
+	keys := make([]string, 0, len(data))
+	for k := range data {
+		keys = append(keys, k)
+	}
+
+	fixedKeys := make([]string, 0, 4+len(data))
+	if !f.DisableTimestamp {
+		fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime))
+	}
+	fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel))
+	if entry.Message != "" {
+		fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg))
+	}
+	if entry.err != "" {
+		fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError))
+	}
+	if entry.HasCaller() {
+		fixedKeys = append(fixedKeys,
+			f.FieldMap.resolve(FieldKeyFunc), f.FieldMap.resolve(FieldKeyFile))
+	}
+
+	if !f.DisableSorting {
+		if f.SortingFunc == nil {
+			sort.Strings(keys)
+			fixedKeys = append(fixedKeys, keys...)
+		} else {
+			if !f.isColored() {
+				fixedKeys = append(fixedKeys, keys...)
+				f.SortingFunc(fixedKeys)
+			} else {
+				f.SortingFunc(keys)
+			}
+		}
+	} else {
+		fixedKeys = append(fixedKeys, keys...)
+	}
+
+	var b *bytes.Buffer
+	if entry.Buffer != nil {
+		b = entry.Buffer
+	} else {
+		b = &bytes.Buffer{}
+	}
+
+	f.terminalInitOnce.Do(func() { f.init(entry) })
+
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = defaultTimestampFormat
+	}
+	if f.isColored() {
+		f.printColored(b, entry, keys, data, timestampFormat)
+	} else {
+		for _, key := range fixedKeys {
+			var value interface{}
+			switch {
+			case key == f.FieldMap.resolve(FieldKeyTime):
+				value = entry.Time.Format(timestampFormat)
+			case key == f.FieldMap.resolve(FieldKeyLevel):
+				value = entry.Level.String()
+			case key == f.FieldMap.resolve(FieldKeyMsg):
+				value = entry.Message
+			case key == f.FieldMap.resolve(FieldKeyLogrusError):
+				value = entry.err
+			case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller():
+				value = entry.Caller.Function
+			case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller():
+				value = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
+			default:
+				value = data[key]
+			}
+			f.appendKeyValue(b, key, value)
+		}
+	}
+
+	b.WriteByte('\n')
+	return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) {
+	var levelColor int
+	switch entry.Level {
+	case DebugLevel, TraceLevel:
+		levelColor = gray
+	case WarnLevel:
+		levelColor = yellow
+	case ErrorLevel, FatalLevel, PanicLevel:
+		levelColor = red
+	default:
+		levelColor = blue
+	}
+
+	levelText := strings.ToUpper(entry.Level.String())
+	if !f.DisableLevelTruncation {
+		levelText = levelText[0:4]
+	}
+
+	// Remove a single newline if it already exists in the message to keep
+	// the behavior of logrus text_formatter the same as the stdlib log package
+	entry.Message = strings.TrimSuffix(entry.Message, "\n")
+
+	caller := ""
+
+	if entry.HasCaller() {
+		caller = fmt.Sprintf("%s:%d %s()",
+			entry.Caller.File, entry.Caller.Line, entry.Caller.Function)
+	}
+
+	if f.DisableTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message)
+	} else if !f.FullTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message)
+	} else {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message)
+	}
+	for _, k := range keys {
+		v := data[k]
+		fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+		f.appendValue(b, v)
+	}
+}
+
+func (f *TextFormatter) needsQuoting(text string) bool {
+	if f.QuoteEmptyFields && len(text) == 0 {
+		return true
+	}
+	for _, ch := range text {
+		if !((ch >= 'a' && ch <= 'z') ||
+			(ch >= 'A' && ch <= 'Z') ||
+			(ch >= '0' && ch <= '9') ||
+			ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
+			return true
+		}
+	}
+	return false
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+	if b.Len() > 0 {
+		b.WriteByte(' ')
+	}
+	b.WriteString(key)
+	b.WriteByte('=')
+	f.appendValue(b, value)
+}
+
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
+	stringVal, ok := value.(string)
+	if !ok {
+		stringVal = fmt.Sprint(value)
+	}
+
+	if !f.needsQuoting(stringVal) {
+		b.WriteString(stringVal)
+	} else {
+		b.WriteString(fmt.Sprintf("%q", stringVal))
+	}
+}
diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go
new file mode 100644
index 0000000..9e1f751
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/writer.go
@@ -0,0 +1,64 @@
+package logrus
+
+import (
+	"bufio"
+	"io"
+	"runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+	return logger.WriterLevel(InfoLevel)
+}
+
+func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+	return NewEntry(logger).WriterLevel(level)
+}
+
+func (entry *Entry) Writer() *io.PipeWriter {
+	return entry.WriterLevel(InfoLevel)
+}
+
+func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
+	reader, writer := io.Pipe()
+
+	var printFunc func(args ...interface{})
+
+	switch level {
+	case TraceLevel:
+		printFunc = entry.Trace
+	case DebugLevel:
+		printFunc = entry.Debug
+	case InfoLevel:
+		printFunc = entry.Info
+	case WarnLevel:
+		printFunc = entry.Warn
+	case ErrorLevel:
+		printFunc = entry.Error
+	case FatalLevel:
+		printFunc = entry.Fatal
+	case PanicLevel:
+		printFunc = entry.Panic
+	default:
+		printFunc = entry.Print
+	}
+
+	go entry.writerScanner(reader, printFunc)
+	runtime.SetFinalizer(writer, writerFinalizer)
+
+	return writer
+}
+
+func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+	scanner := bufio.NewScanner(reader)
+	for scanner.Scan() {
+		printFunc(scanner.Text())
+	}
+	if err := scanner.Err(); err != nil {
+		entry.Errorf("Error while reading from Writer: %s", err)
+	}
+	reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+	writer.Close()
+}
diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE
new file mode 100644
index 0000000..f38ec59
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
new file mode 100644
index 0000000..aa1c2b9
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -0,0 +1,484 @@
+/*
+* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
+* THIS FILE MUST NOT BE EDITED BY HAND
+ */
+
+package assert
+
+import (
+	http "net/http"
+	url "net/url"
+	time "time"
+)
+
+// Conditionf uses a Comparison to assert a complex condition.
+func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Condition(t, comp, append([]interface{}{msg}, args...)...)
+}
+
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+//    assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
+//    assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
+//    assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
+func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Contains(t, s, contains, append([]interface{}{msg}, args...)...)
+}
+
+// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return DirExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
+func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
+}
+
+// Emptyf asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+//  assert.Emptyf(t, obj, "error message %s", "formatted")
+func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Empty(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// Equalf asserts that two objects are equal.
+//
+//    assert.Equalf(t, 123, 123, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Equal(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+//   actualObj, err := SomeFunction()
+//   assert.EqualErrorf(t, err,  expectedErrorString, "error message %s", "formatted")
+func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...)
+}
+
+// EqualValuesf asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+//    assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123))
+func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+//   actualObj, err := SomeFunction()
+//   if assert.Errorf(t, err, "error message %s", "formatted") {
+// 	   assert.Equal(t, expectedErrorf, err)
+//   }
+func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Error(t, err, append([]interface{}{msg}, args...)...)
+}
+
+// Exactlyf asserts that two objects are equal in value and type.
+//
+//    assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123))
+func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Failf reports a failure through
+func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Fail(t, failureMessage, append([]interface{}{msg}, args...)...)
+}
+
+// FailNowf fails test
+func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...)
+}
+
+// Falsef asserts that the specified value is false.
+//
+//    assert.Falsef(t, myBool, "error message %s", "formatted")
+func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return False(t, value, append([]interface{}{msg}, args...)...)
+}
+
+// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return FileExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+//  assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+//  assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+//  assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+//  assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+//  assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+//    assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// 	 assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
+}
+
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
+}
+
+// IsTypef asserts that the specified objects are of the same type.
+func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...)
+}
+
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+//  assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+//    assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
+func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Len(t, object, length, append([]interface{}{msg}, args...)...)
+}
+
+// Nilf asserts that the specified object is nil.
+//
+//    assert.Nilf(t, err, "error message %s", "formatted")
+func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Nil(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+//   actualObj, err := SomeFunction()
+//   if assert.NoErrorf(t, err, "error message %s", "formatted") {
+// 	   assert.Equal(t, expectedObj, actualObj)
+//   }
+func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NoError(t, err, append([]interface{}{msg}, args...)...)
+}
+
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+//    assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
+//    assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
+//    assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
+func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
+}
+
+// NotEmptyf asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+//  if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
+//    assert.Equal(t, "two", obj[1])
+//  }
+func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotEmpty(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NotEqualf asserts that the specified values are NOT equal.
+//
+//    assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// NotNilf asserts that the specified object is not nil.
+//
+//    assert.NotNilf(t, err, "error message %s", "formatted")
+func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotNil(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+//   assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
+func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotPanics(t, f, append([]interface{}{msg}, args...)...)
+}
+
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+//  assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+//  assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
+func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...)
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+//    assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...)
+}
+
+// NotZerof asserts that i is not the zero value for its type.
+func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotZero(t, i, append([]interface{}{msg}, args...)...)
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+//   assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
+func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Panics(t, f, append([]interface{}{msg}, args...)...)
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+//   assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...)
+}
+
+// Regexpf asserts that a specified regexp matches a string.
+//
+//  assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+//  assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
+func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Regexp(t, rx, str, append([]interface{}{msg}, args...)...)
+}
+
+// Subsetf asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+//    assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Subset(t, list, subset, append([]interface{}{msg}, args...)...)
+}
+
+// Truef asserts that the specified value is true.
+//
+//    assert.Truef(t, myBool, "error message %s", "formatted")
+func Truef(t TestingT, value bool, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return True(t, value, append([]interface{}{msg}, args...)...)
+}
+
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+//   assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// Zerof asserts that i is the zero value for its type.
+func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Zero(t, i, append([]interface{}{msg}, args...)...)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
new file mode 100644
index 0000000..d2bb0b8
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
@@ -0,0 +1,5 @@
+{{.CommentFormat}}
+func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool {
+	if h, ok := t.(tHelper); ok { h.Helper() }
+	return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}})
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
new file mode 100644
index 0000000..de39f79
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -0,0 +1,956 @@
+/*
+* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
+* THIS FILE MUST NOT BE EDITED BY HAND
+ */
+
+package assert
+
+import (
+	http "net/http"
+	url "net/url"
+	time "time"
+)
+
+// Condition uses a Comparison to assert a complex condition.
+func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Condition(a.t, comp, msgAndArgs...)
+}
+
+// Conditionf uses a Comparison to assert a complex condition.
+func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Conditionf(a.t, comp, msg, args...)
+}
+
+// Contains asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+//    a.Contains("Hello World", "World")
+//    a.Contains(["Hello", "World"], "World")
+//    a.Contains({"Hello": "World"}, "Hello")
+func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Contains(a.t, s, contains, msgAndArgs...)
+}
+
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+//    a.Containsf("Hello World", "World", "error message %s", "formatted")
+//    a.Containsf(["Hello", "World"], "World", "error message %s", "formatted")
+//    a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted")
+func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Containsf(a.t, s, contains, msg, args...)
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return DirExists(a.t, path, msgAndArgs...)
+}
+
+// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return DirExistsf(a.t, path, msg, args...)
+}
+
+// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2])
+func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return ElementsMatch(a.t, listA, listB, msgAndArgs...)
+}
+
+// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
+func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return ElementsMatchf(a.t, listA, listB, msg, args...)
+}
+
+// Empty asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+//  a.Empty(obj)
+func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Empty(a.t, object, msgAndArgs...)
+}
+
+// Emptyf asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+//  a.Emptyf(obj, "error message %s", "formatted")
+func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Emptyf(a.t, object, msg, args...)
+}
+
+// Equal asserts that two objects are equal.
+//
+//    a.Equal(123, 123)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Equal(a.t, expected, actual, msgAndArgs...)
+}
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+//   actualObj, err := SomeFunction()
+//   a.EqualError(err,  expectedErrorString)
+func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return EqualError(a.t, theError, errString, msgAndArgs...)
+}
+
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+//   actualObj, err := SomeFunction()
+//   a.EqualErrorf(err,  expectedErrorString, "error message %s", "formatted")
+func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return EqualErrorf(a.t, theError, errString, msg, args...)
+}
+
+// EqualValues asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+//    a.EqualValues(uint32(123), int32(123))
+func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return EqualValues(a.t, expected, actual, msgAndArgs...)
+}
+
+// EqualValuesf asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+//    a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123))
+func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return EqualValuesf(a.t, expected, actual, msg, args...)
+}
+
+// Equalf asserts that two objects are equal.
+//
+//    a.Equalf(123, 123, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Equalf(a.t, expected, actual, msg, args...)
+}
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+//
+//   actualObj, err := SomeFunction()
+//   if a.Error(err) {
+// 	   assert.Equal(t, expectedError, err)
+//   }
+func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Error(a.t, err, msgAndArgs...)
+}
+
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+//   actualObj, err := SomeFunction()
+//   if a.Errorf(err, "error message %s", "formatted") {
+// 	   assert.Equal(t, expectedErrorf, err)
+//   }
+func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Errorf(a.t, err, msg, args...)
+}
+
+// Exactly asserts that two objects are equal in value and type.
+//
+//    a.Exactly(int32(123), int64(123))
+func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Exactly(a.t, expected, actual, msgAndArgs...)
+}
+
+// Exactlyf asserts that two objects are equal in value and type.
+//
+//    a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123))
+func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Exactlyf(a.t, expected, actual, msg, args...)
+}
+
+// Fail reports a failure through
+func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Fail(a.t, failureMessage, msgAndArgs...)
+}
+
+// FailNow fails test
+func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return FailNow(a.t, failureMessage, msgAndArgs...)
+}
+
+// FailNowf fails test
+func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return FailNowf(a.t, failureMessage, msg, args...)
+}
+
+// Failf reports a failure through
+func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Failf(a.t, failureMessage, msg, args...)
+}
+
+// False asserts that the specified value is false.
+//
+//    a.False(myBool)
+func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return False(a.t, value, msgAndArgs...)
+}
+
+// Falsef asserts that the specified value is false.
+//
+//    a.Falsef(myBool, "error message %s", "formatted")
+func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Falsef(a.t, value, msg, args...)
+}
+
+// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return FileExists(a.t, path, msgAndArgs...)
+}
+
+// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return FileExistsf(a.t, path, msg, args...)
+}
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+//
+//  a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...)
+}
+
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+//  a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...)
+}
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+//
+//  a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...)
+}
+
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+//  a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...)
+}
+
+// HTTPError asserts that a specified handler returns an error status code.
+//
+//  a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPError(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+//  a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPErrorf(a.t, handler, method, url, values, msg, args...)
+}
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+//
+//  a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+//  a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPRedirectf(a.t, handler, method, url, values, msg, args...)
+}
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+//
+//  a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+//  a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return HTTPSuccessf(a.t, handler, method, url, values, msg, args...)
+}
+
+// Implements asserts that an object is implemented by the specified interface.
+//
+//    a.Implements((*MyInterface)(nil), new(MyObject))
+func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Implements(a.t, interfaceObject, object, msgAndArgs...)
+}
+
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+//    a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Implementsf(a.t, interfaceObject, object, msg, args...)
+}
+
+// InDelta asserts that the two numerals are within delta of each other.
+//
+// 	 a.InDelta(math.Pi, (22 / 7.0), 0.01)
+func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return InDelta(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...)
+}
+
+// InDeltaSlice is the same as InDelta, except it compares two slices.
+func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return InDeltaSlicef(a.t, expected, actual, delta, msg, args...)
+}
+
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// 	 a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return InDeltaf(a.t, expected, actual, delta, msg, args...)
+}
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
+}
+
+// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
+func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
+}
+
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...)
+}
+
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
+}
+
+// IsType asserts that the specified objects are of the same type.
+func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return IsType(a.t, expectedType, object, msgAndArgs...)
+}
+
+// IsTypef asserts that the specified objects are of the same type.
+func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return IsTypef(a.t, expectedType, object, msg, args...)
+}
+
+// JSONEq asserts that two JSON strings are equivalent.
+//
+//  a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return JSONEq(a.t, expected, actual, msgAndArgs...)
+}
+
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+//  a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return JSONEqf(a.t, expected, actual, msg, args...)
+}
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+//
+//    a.Len(mySlice, 3)
+func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Len(a.t, object, length, msgAndArgs...)
+}
+
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+//    a.Lenf(mySlice, 3, "error message %s", "formatted")
+func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Lenf(a.t, object, length, msg, args...)
+}
+
+// Nil asserts that the specified object is nil.
+//
+//    a.Nil(err)
+func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Nil(a.t, object, msgAndArgs...)
+}
+
+// Nilf asserts that the specified object is nil.
+//
+//    a.Nilf(err, "error message %s", "formatted")
+func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Nilf(a.t, object, msg, args...)
+}
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+//
+//   actualObj, err := SomeFunction()
+//   if a.NoError(err) {
+// 	   assert.Equal(t, expectedObj, actualObj)
+//   }
+func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NoError(a.t, err, msgAndArgs...)
+}
+
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+//   actualObj, err := SomeFunction()
+//   if a.NoErrorf(err, "error message %s", "formatted") {
+// 	   assert.Equal(t, expectedObj, actualObj)
+//   }
+func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NoErrorf(a.t, err, msg, args...)
+}
+
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+//    a.NotContains("Hello World", "Earth")
+//    a.NotContains(["Hello", "World"], "Earth")
+//    a.NotContains({"Hello": "World"}, "Earth")
+func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotContains(a.t, s, contains, msgAndArgs...)
+}
+
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+//    a.NotContainsf("Hello World", "Earth", "error message %s", "formatted")
+//    a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted")
+//    a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted")
+func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotContainsf(a.t, s, contains, msg, args...)
+}
+
+// NotEmpty asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+//  if a.NotEmpty(obj) {
+//    assert.Equal(t, "two", obj[1])
+//  }
+func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotEmpty(a.t, object, msgAndArgs...)
+}
+
+// NotEmptyf asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+//  if a.NotEmptyf(obj, "error message %s", "formatted") {
+//    assert.Equal(t, "two", obj[1])
+//  }
+func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotEmptyf(a.t, object, msg, args...)
+}
+
+// NotEqual asserts that the specified values are NOT equal.
+//
+//    a.NotEqual(obj1, obj2)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotEqual(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotEqualf asserts that the specified values are NOT equal.
+//
+//    a.NotEqualf(obj1, obj2, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotEqualf(a.t, expected, actual, msg, args...)
+}
+
+// NotNil asserts that the specified object is not nil.
+//
+//    a.NotNil(err)
+func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotNil(a.t, object, msgAndArgs...)
+}
+
+// NotNilf asserts that the specified object is not nil.
+//
+//    a.NotNilf(err, "error message %s", "formatted")
+func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotNilf(a.t, object, msg, args...)
+}
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+//   a.NotPanics(func(){ RemainCalm() })
+func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotPanics(a.t, f, msgAndArgs...)
+}
+
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+//   a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted")
+func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotPanicsf(a.t, f, msg, args...)
+}
+
+// NotRegexp asserts that a specified regexp does not match a string.
+//
+//  a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
+//  a.NotRegexp("^start", "it's not starting")
+func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotRegexp(a.t, rx, str, msgAndArgs...)
+}
+
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+//  a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+//  a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
+func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotRegexpf(a.t, rx, str, msg, args...)
+}
+
+// NotSubset asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+//    a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotSubset(a.t, list, subset, msgAndArgs...)
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+//    a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotSubsetf(a.t, list, subset, msg, args...)
+}
+
+// NotZero asserts that i is not the zero value for its type.
+func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotZero(a.t, i, msgAndArgs...)
+}
+
+// NotZerof asserts that i is not the zero value for its type.
+func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotZerof(a.t, i, msg, args...)
+}
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+//
+//   a.Panics(func(){ GoCrazy() })
+func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Panics(a.t, f, msgAndArgs...)
+}
+
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+//   a.PanicsWithValue("crazy error", func(){ GoCrazy() })
+func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return PanicsWithValue(a.t, expected, f, msgAndArgs...)
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+//   a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return PanicsWithValuef(a.t, expected, f, msg, args...)
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+//   a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted")
+func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Panicsf(a.t, f, msg, args...)
+}
+
+// Regexp asserts that a specified regexp matches a string.
+//
+//  a.Regexp(regexp.MustCompile("start"), "it's starting")
+//  a.Regexp("start...$", "it's not starting")
+func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Regexp(a.t, rx, str, msgAndArgs...)
+}
+
+// Regexpf asserts that a specified regexp matches a string.
+//
+//  a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+//  a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
+func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Regexpf(a.t, rx, str, msg, args...)
+}
+
+// Subset asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+//    a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Subset(a.t, list, subset, msgAndArgs...)
+}
+
+// Subsetf asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+//    a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Subsetf(a.t, list, subset, msg, args...)
+}
+
+// True asserts that the specified value is true.
+//
+//    a.True(myBool)
+func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return True(a.t, value, msgAndArgs...)
+}
+
+// Truef asserts that the specified value is true.
+//
+//    a.Truef(myBool, "error message %s", "formatted")
+func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Truef(a.t, value, msg, args...)
+}
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+//
+//   a.WithinDuration(time.Now(), time.Now(), 10*time.Second)
+func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+//   a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return WithinDurationf(a.t, expected, actual, delta, msg, args...)
+}
+
+// Zero asserts that i is the zero value for its type.
+func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Zero(a.t, i, msgAndArgs...)
+}
+
+// Zerof asserts that i is the zero value for its type.
+func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return Zerof(a.t, i, msg, args...)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
new file mode 100644
index 0000000..188bb9e
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
@@ -0,0 +1,5 @@
+{{.CommentWithoutT "a"}}
+func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool {
+	if h, ok := a.t.(tHelper); ok { h.Helper() }
+	return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
new file mode 100644
index 0000000..9bd4a80
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -0,0 +1,1416 @@
+package assert
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"math"
+	"os"
+	"reflect"
+	"regexp"
+	"runtime"
+	"strings"
+	"time"
+	"unicode"
+	"unicode/utf8"
+
+	"github.com/davecgh/go-spew/spew"
+	"github.com/pmezard/go-difflib/difflib"
+)
+
+//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl
+
+// TestingT is an interface wrapper around *testing.T
+type TestingT interface {
+	Errorf(format string, args ...interface{})
+}
+
+// ComparisonAssertionFunc is a common function prototype when comparing two values.  Can be useful
+// for table driven tests.
+type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) bool
+
+// ValueAssertionFunc is a common function prototype when validating a single value.  Can be useful
+// for table driven tests.
+type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) bool
+
+// BoolAssertionFunc is a common function prototype when validating a bool value.  Can be useful
+// for table driven tests.
+type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool
+
+// ErrorAssertionFunc is a common function prototype when validating an error value.  Can be useful
+// for table driven tests.
+type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
+
+// Comparison a custom function that returns true on success and false on failure
+type Comparison func() (success bool)
+
+/*
+	Helper functions
+*/
+
+// ObjectsAreEqual determines if two objects are considered equal.
+//
+// This function does no assertion of any kind.
+func ObjectsAreEqual(expected, actual interface{}) bool {
+	if expected == nil || actual == nil {
+		return expected == actual
+	}
+
+	exp, ok := expected.([]byte)
+	if !ok {
+		return reflect.DeepEqual(expected, actual)
+	}
+
+	act, ok := actual.([]byte)
+	if !ok {
+		return false
+	}
+	if exp == nil || act == nil {
+		return exp == nil && act == nil
+	}
+	return bytes.Equal(exp, act)
+}
+
+// ObjectsAreEqualValues gets whether two objects are equal, or if their
+// values are equal.
+func ObjectsAreEqualValues(expected, actual interface{}) bool {
+	if ObjectsAreEqual(expected, actual) {
+		return true
+	}
+
+	actualType := reflect.TypeOf(actual)
+	if actualType == nil {
+		return false
+	}
+	expectedValue := reflect.ValueOf(expected)
+	if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {
+		// Attempt comparison after type conversion
+		return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)
+	}
+
+	return false
+}
+
+/* CallerInfo is necessary because the assert functions use the testing object
+internally, causing it to print the file:line of the assert method, rather than where
+the problem actually occurred in calling code.*/
+
+// CallerInfo returns an array of strings containing the file and line number
+// of each stack frame leading from the current test to the assert call that
+// failed.
+func CallerInfo() []string {
+
+	pc := uintptr(0)
+	file := ""
+	line := 0
+	ok := false
+	name := ""
+
+	callers := []string{}
+	for i := 0; ; i++ {
+		pc, file, line, ok = runtime.Caller(i)
+		if !ok {
+			// The breaks below failed to terminate the loop, and we ran off the
+			// end of the call stack.
+			break
+		}
+
+		// This is a huge edge case, but it will panic if this is the case, see #180
+		if file == "<autogenerated>" {
+			break
+		}
+
+		f := runtime.FuncForPC(pc)
+		if f == nil {
+			break
+		}
+		name = f.Name()
+
+		// testing.tRunner is the standard library function that calls
+		// tests. Subtests are called directly by tRunner, without going through
+		// the Test/Benchmark/Example function that contains the t.Run calls, so
+		// with subtests we should break when we hit tRunner, without adding it
+		// to the list of callers.
+		if name == "testing.tRunner" {
+			break
+		}
+
+		parts := strings.Split(file, "/")
+		file = parts[len(parts)-1]
+		if len(parts) > 1 {
+			dir := parts[len(parts)-2]
+			if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
+				callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+			}
+		}
+
+		// Drop the package
+		segments := strings.Split(name, ".")
+		name = segments[len(segments)-1]
+		if isTest(name, "Test") ||
+			isTest(name, "Benchmark") ||
+			isTest(name, "Example") {
+			break
+		}
+	}
+
+	return callers
+}
+
+// Stolen from the `go test` tool.
+// isTest tells whether name looks like a test (or benchmark, according to prefix).
+// It is a Test (say) if there is a character after Test that is not a lower-case letter.
+// We don't want TesticularCancer.
+func isTest(name, prefix string) bool {
+	if !strings.HasPrefix(name, prefix) {
+		return false
+	}
+	if len(name) == len(prefix) { // "Test" is ok
+		return true
+	}
+	rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
+	return !unicode.IsLower(rune)
+}
+
+func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
+	if len(msgAndArgs) == 0 || msgAndArgs == nil {
+		return ""
+	}
+	if len(msgAndArgs) == 1 {
+		msg := msgAndArgs[0]
+		if msgAsStr, ok := msg.(string); ok {
+			return msgAsStr
+		}
+		return fmt.Sprintf("%+v", msg)
+	}
+	if len(msgAndArgs) > 1 {
+		return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...)
+	}
+	return ""
+}
+
+// Aligns the provided message so that all lines after the first line start at the same location as the first line.
+// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab).
+// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the
+// basis on which the alignment occurs).
+func indentMessageLines(message string, longestLabelLen int) string {
+	outBuf := new(bytes.Buffer)
+
+	for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ {
+		// no need to align first line because it starts at the correct location (after the label)
+		if i != 0 {
+			// append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab
+			outBuf.WriteString("\n\t" + strings.Repeat(" ", longestLabelLen+1) + "\t")
+		}
+		outBuf.WriteString(scanner.Text())
+	}
+
+	return outBuf.String()
+}
+
+type failNower interface {
+	FailNow()
+}
+
+// FailNow fails test
+func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	Fail(t, failureMessage, msgAndArgs...)
+
+	// We cannot extend TestingT with FailNow() and
+	// maintain backwards compatibility, so we fallback
+	// to panicking when FailNow is not available in
+	// TestingT.
+	// See issue #263
+
+	if t, ok := t.(failNower); ok {
+		t.FailNow()
+	} else {
+		panic("test failed and t is missing `FailNow()`")
+	}
+	return false
+}
+
+// Fail reports a failure through
+func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	content := []labeledContent{
+		{"Error Trace", strings.Join(CallerInfo(), "\n\t\t\t")},
+		{"Error", failureMessage},
+	}
+
+	// Add test name if the Go version supports it
+	if n, ok := t.(interface {
+		Name() string
+	}); ok {
+		content = append(content, labeledContent{"Test", n.Name()})
+	}
+
+	message := messageFromMsgAndArgs(msgAndArgs...)
+	if len(message) > 0 {
+		content = append(content, labeledContent{"Messages", message})
+	}
+
+	t.Errorf("\n%s", ""+labeledOutput(content...))
+
+	return false
+}
+
+type labeledContent struct {
+	label   string
+	content string
+}
+
+// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner:
+//
+//   \t{{label}}:{{align_spaces}}\t{{content}}\n
+//
+// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label.
+// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this
+// alignment is achieved, "\t{{content}}\n" is added for the output.
+//
+// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line.
+func labeledOutput(content ...labeledContent) string {
+	longestLabel := 0
+	for _, v := range content {
+		if len(v.label) > longestLabel {
+			longestLabel = len(v.label)
+		}
+	}
+	var output string
+	for _, v := range content {
+		output += "\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n"
+	}
+	return output
+}
+
+// Implements asserts that an object is implemented by the specified interface.
+//
+//    assert.Implements(t, (*MyInterface)(nil), new(MyObject))
+func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	interfaceType := reflect.TypeOf(interfaceObject).Elem()
+
+	if object == nil {
+		return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...)
+	}
+	if !reflect.TypeOf(object).Implements(interfaceType) {
+		return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...)
+	}
+
+	return true
+}
+
+// IsType asserts that the specified objects are of the same type.
+func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) {
+		return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...)
+	}
+
+	return true
+}
+
+// Equal asserts that two objects are equal.
+//
+//    assert.Equal(t, 123, 123)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if err := validateEqualArgs(expected, actual); err != nil {
+		return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)",
+			expected, actual, err), msgAndArgs...)
+	}
+
+	if !ObjectsAreEqual(expected, actual) {
+		diff := diff(expected, actual)
+		expected, actual = formatUnequalValues(expected, actual)
+		return Fail(t, fmt.Sprintf("Not equal: \n"+
+			"expected: %s\n"+
+			"actual  : %s%s", expected, actual, diff), msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// formatUnequalValues takes two values of arbitrary types and returns string
+// representations appropriate to be presented to the user.
+//
+// If the values are not of like type, the returned strings will be prefixed
+// with the type name, and the value will be enclosed in parenthesis similar
+// to a type conversion in the Go grammar.
+func formatUnequalValues(expected, actual interface{}) (e string, a string) {
+	if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
+		return fmt.Sprintf("%T(%#v)", expected, expected),
+			fmt.Sprintf("%T(%#v)", actual, actual)
+	}
+
+	return fmt.Sprintf("%#v", expected),
+		fmt.Sprintf("%#v", actual)
+}
+
+// EqualValues asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+//    assert.EqualValues(t, uint32(123), int32(123))
+func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	if !ObjectsAreEqualValues(expected, actual) {
+		diff := diff(expected, actual)
+		expected, actual = formatUnequalValues(expected, actual)
+		return Fail(t, fmt.Sprintf("Not equal: \n"+
+			"expected: %s\n"+
+			"actual  : %s%s", expected, actual, diff), msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// Exactly asserts that two objects are equal in value and type.
+//
+//    assert.Exactly(t, int32(123), int64(123))
+func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	aType := reflect.TypeOf(expected)
+	bType := reflect.TypeOf(actual)
+
+	if aType != bType {
+		return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...)
+	}
+
+	return Equal(t, expected, actual, msgAndArgs...)
+
+}
+
+// NotNil asserts that the specified object is not nil.
+//
+//    assert.NotNil(t, err)
+func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if !isNil(object) {
+		return true
+	}
+	return Fail(t, "Expected value not to be nil.", msgAndArgs...)
+}
+
+// containsKind checks if a specified kind in the slice of kinds.
+func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool {
+	for i := 0; i < len(kinds); i++ {
+		if kind == kinds[i] {
+			return true
+		}
+	}
+
+	return false
+}
+
+// isNil checks if a specified object is nil or not, without Failing.
+func isNil(object interface{}) bool {
+	if object == nil {
+		return true
+	}
+
+	value := reflect.ValueOf(object)
+	kind := value.Kind()
+	isNilableKind := containsKind(
+		[]reflect.Kind{
+			reflect.Chan, reflect.Func,
+			reflect.Interface, reflect.Map,
+			reflect.Ptr, reflect.Slice},
+		kind)
+
+	if isNilableKind && value.IsNil() {
+		return true
+	}
+
+	return false
+}
+
+// Nil asserts that the specified object is nil.
+//
+//    assert.Nil(t, err)
+func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if isNil(object) {
+		return true
+	}
+	return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...)
+}
+
+// isEmpty gets whether the specified object is considered empty or not.
+func isEmpty(object interface{}) bool {
+
+	// get nil case out of the way
+	if object == nil {
+		return true
+	}
+
+	objValue := reflect.ValueOf(object)
+
+	switch objValue.Kind() {
+	// collection types are empty when they have no element
+	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+		return objValue.Len() == 0
+	// pointers are empty if nil or if the value they point to is empty
+	case reflect.Ptr:
+		if objValue.IsNil() {
+			return true
+		}
+		deref := objValue.Elem().Interface()
+		return isEmpty(deref)
+	// for all other types, compare against the zero value
+	default:
+		zero := reflect.Zero(objValue.Type())
+		return reflect.DeepEqual(object, zero.Interface())
+	}
+}
+
+// Empty asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+//  assert.Empty(t, obj)
+func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	pass := isEmpty(object)
+	if !pass {
+		Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...)
+	}
+
+	return pass
+
+}
+
+// NotEmpty asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+//  if assert.NotEmpty(t, obj) {
+//    assert.Equal(t, "two", obj[1])
+//  }
+func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	pass := !isEmpty(object)
+	if !pass {
+		Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...)
+	}
+
+	return pass
+
+}
+
+// getLen try to get length of object.
+// return (false, 0) if impossible.
+func getLen(x interface{}) (ok bool, length int) {
+	v := reflect.ValueOf(x)
+	defer func() {
+		if e := recover(); e != nil {
+			ok = false
+		}
+	}()
+	return true, v.Len()
+}
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+//
+//    assert.Len(t, mySlice, 3)
+func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	ok, l := getLen(object)
+	if !ok {
+		return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...)
+	}
+
+	if l != length {
+		return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
+	}
+	return true
+}
+
+// True asserts that the specified value is true.
+//
+//    assert.True(t, myBool)
+func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if h, ok := t.(interface {
+		Helper()
+	}); ok {
+		h.Helper()
+	}
+
+	if value != true {
+		return Fail(t, "Should be true", msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// False asserts that the specified value is false.
+//
+//    assert.False(t, myBool)
+func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	if value != false {
+		return Fail(t, "Should be false", msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// NotEqual asserts that the specified values are NOT equal.
+//
+//    assert.NotEqual(t, obj1, obj2)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if err := validateEqualArgs(expected, actual); err != nil {
+		return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)",
+			expected, actual, err), msgAndArgs...)
+	}
+
+	if ObjectsAreEqual(expected, actual) {
+		return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// containsElement try loop over the list check if the list includes the element.
+// return (false, false) if impossible.
+// return (true, false) if element was not found.
+// return (true, true) if element was found.
+func includeElement(list interface{}, element interface{}) (ok, found bool) {
+
+	listValue := reflect.ValueOf(list)
+	elementValue := reflect.ValueOf(element)
+	defer func() {
+		if e := recover(); e != nil {
+			ok = false
+			found = false
+		}
+	}()
+
+	if reflect.TypeOf(list).Kind() == reflect.String {
+		return true, strings.Contains(listValue.String(), elementValue.String())
+	}
+
+	if reflect.TypeOf(list).Kind() == reflect.Map {
+		mapKeys := listValue.MapKeys()
+		for i := 0; i < len(mapKeys); i++ {
+			if ObjectsAreEqual(mapKeys[i].Interface(), element) {
+				return true, true
+			}
+		}
+		return true, false
+	}
+
+	for i := 0; i < listValue.Len(); i++ {
+		if ObjectsAreEqual(listValue.Index(i).Interface(), element) {
+			return true, true
+		}
+	}
+	return true, false
+
+}
+
+// Contains asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+//    assert.Contains(t, "Hello World", "World")
+//    assert.Contains(t, ["Hello", "World"], "World")
+//    assert.Contains(t, {"Hello": "World"}, "Hello")
+func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	ok, found := includeElement(s, contains)
+	if !ok {
+		return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
+	}
+	if !found {
+		return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+//    assert.NotContains(t, "Hello World", "Earth")
+//    assert.NotContains(t, ["Hello", "World"], "Earth")
+//    assert.NotContains(t, {"Hello": "World"}, "Earth")
+func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	ok, found := includeElement(s, contains)
+	if !ok {
+		return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
+	}
+	if found {
+		return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...)
+	}
+
+	return true
+
+}
+
+// Subset asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+//    assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if subset == nil {
+		return true // we consider nil to be equal to the nil set
+	}
+
+	subsetValue := reflect.ValueOf(subset)
+	defer func() {
+		if e := recover(); e != nil {
+			ok = false
+		}
+	}()
+
+	listKind := reflect.TypeOf(list).Kind()
+	subsetKind := reflect.TypeOf(subset).Kind()
+
+	if listKind != reflect.Array && listKind != reflect.Slice {
+		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
+	}
+
+	if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
+	}
+
+	for i := 0; i < subsetValue.Len(); i++ {
+		element := subsetValue.Index(i).Interface()
+		ok, found := includeElement(list, element)
+		if !ok {
+			return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+		}
+		if !found {
+			return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...)
+		}
+	}
+
+	return true
+}
+
+// NotSubset asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+//    assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if subset == nil {
+		return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...)
+	}
+
+	subsetValue := reflect.ValueOf(subset)
+	defer func() {
+		if e := recover(); e != nil {
+			ok = false
+		}
+	}()
+
+	listKind := reflect.TypeOf(list).Kind()
+	subsetKind := reflect.TypeOf(subset).Kind()
+
+	if listKind != reflect.Array && listKind != reflect.Slice {
+		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
+	}
+
+	if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
+	}
+
+	for i := 0; i < subsetValue.Len(); i++ {
+		element := subsetValue.Index(i).Interface()
+		ok, found := includeElement(list, element)
+		if !ok {
+			return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+		}
+		if !found {
+			return true
+		}
+	}
+
+	return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
+}
+
+// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2])
+func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if isEmpty(listA) && isEmpty(listB) {
+		return true
+	}
+
+	aKind := reflect.TypeOf(listA).Kind()
+	bKind := reflect.TypeOf(listB).Kind()
+
+	if aKind != reflect.Array && aKind != reflect.Slice {
+		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...)
+	}
+
+	if bKind != reflect.Array && bKind != reflect.Slice {
+		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...)
+	}
+
+	aValue := reflect.ValueOf(listA)
+	bValue := reflect.ValueOf(listB)
+
+	aLen := aValue.Len()
+	bLen := bValue.Len()
+
+	if aLen != bLen {
+		return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...)
+	}
+
+	// Mark indexes in bValue that we already used
+	visited := make([]bool, bLen)
+	for i := 0; i < aLen; i++ {
+		element := aValue.Index(i).Interface()
+		found := false
+		for j := 0; j < bLen; j++ {
+			if visited[j] {
+				continue
+			}
+			if ObjectsAreEqual(bValue.Index(j).Interface(), element) {
+				visited[j] = true
+				found = true
+				break
+			}
+		}
+		if !found {
+			return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...)
+		}
+	}
+
+	return true
+}
+
+// Condition uses a Comparison to assert a complex condition.
+func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	result := comp()
+	if !result {
+		Fail(t, "Condition failed!", msgAndArgs...)
+	}
+	return result
+}
+
+// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics
+// methods, and represents a simple func that takes no arguments, and returns nothing.
+type PanicTestFunc func()
+
+// didPanic returns true if the function passed to it panics. Otherwise, it returns false.
+func didPanic(f PanicTestFunc) (bool, interface{}) {
+
+	didPanic := false
+	var message interface{}
+	func() {
+
+		defer func() {
+			if message = recover(); message != nil {
+				didPanic = true
+			}
+		}()
+
+		// call the target function
+		f()
+
+	}()
+
+	return didPanic, message
+
+}
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+//
+//   assert.Panics(t, func(){ GoCrazy() })
+func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	if funcDidPanic, panicValue := didPanic(f); !funcDidPanic {
+		return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
+	}
+
+	return true
+}
+
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+//   assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
+func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	funcDidPanic, panicValue := didPanic(f)
+	if !funcDidPanic {
+		return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
+	}
+	if panicValue != expected {
+		return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v", f, expected, panicValue), msgAndArgs...)
+	}
+
+	return true
+}
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+//   assert.NotPanics(t, func(){ RemainCalm() })
+func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	if funcDidPanic, panicValue := didPanic(f); funcDidPanic {
+		return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v", f, panicValue), msgAndArgs...)
+	}
+
+	return true
+}
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+//
+//   assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
+func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	dt := expected.Sub(actual)
+	if dt < -delta || dt > delta {
+		return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
+	}
+
+	return true
+}
+
+func toFloat(x interface{}) (float64, bool) {
+	var xf float64
+	xok := true
+
+	switch xn := x.(type) {
+	case uint8:
+		xf = float64(xn)
+	case uint16:
+		xf = float64(xn)
+	case uint32:
+		xf = float64(xn)
+	case uint64:
+		xf = float64(xn)
+	case int:
+		xf = float64(xn)
+	case int8:
+		xf = float64(xn)
+	case int16:
+		xf = float64(xn)
+	case int32:
+		xf = float64(xn)
+	case int64:
+		xf = float64(xn)
+	case float32:
+		xf = float64(xn)
+	case float64:
+		xf = float64(xn)
+	case time.Duration:
+		xf = float64(xn)
+	default:
+		xok = false
+	}
+
+	return xf, xok
+}
+
+// InDelta asserts that the two numerals are within delta of each other.
+//
+// 	 assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
+func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	af, aok := toFloat(expected)
+	bf, bok := toFloat(actual)
+
+	if !aok || !bok {
+		return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...)
+	}
+
+	if math.IsNaN(af) {
+		return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...)
+	}
+
+	if math.IsNaN(bf) {
+		return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...)
+	}
+
+	dt := af - bf
+	if dt < -delta || dt > delta {
+		return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
+	}
+
+	return true
+}
+
+// InDeltaSlice is the same as InDelta, except it compares two slices.
+func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if expected == nil || actual == nil ||
+		reflect.TypeOf(actual).Kind() != reflect.Slice ||
+		reflect.TypeOf(expected).Kind() != reflect.Slice {
+		return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
+	}
+
+	actualSlice := reflect.ValueOf(actual)
+	expectedSlice := reflect.ValueOf(expected)
+
+	for i := 0; i < actualSlice.Len(); i++ {
+		result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...)
+		if !result {
+			return result
+		}
+	}
+
+	return true
+}
+
+// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if expected == nil || actual == nil ||
+		reflect.TypeOf(actual).Kind() != reflect.Map ||
+		reflect.TypeOf(expected).Kind() != reflect.Map {
+		return Fail(t, "Arguments must be maps", msgAndArgs...)
+	}
+
+	expectedMap := reflect.ValueOf(expected)
+	actualMap := reflect.ValueOf(actual)
+
+	if expectedMap.Len() != actualMap.Len() {
+		return Fail(t, "Arguments must have the same number of keys", msgAndArgs...)
+	}
+
+	for _, k := range expectedMap.MapKeys() {
+		ev := expectedMap.MapIndex(k)
+		av := actualMap.MapIndex(k)
+
+		if !ev.IsValid() {
+			return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...)
+		}
+
+		if !av.IsValid() {
+			return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...)
+		}
+
+		if !InDelta(
+			t,
+			ev.Interface(),
+			av.Interface(),
+			delta,
+			msgAndArgs...,
+		) {
+			return false
+		}
+	}
+
+	return true
+}
+
+func calcRelativeError(expected, actual interface{}) (float64, error) {
+	af, aok := toFloat(expected)
+	if !aok {
+		return 0, fmt.Errorf("expected value %q cannot be converted to float", expected)
+	}
+	if af == 0 {
+		return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error")
+	}
+	bf, bok := toFloat(actual)
+	if !bok {
+		return 0, fmt.Errorf("actual value %q cannot be converted to float", actual)
+	}
+
+	return math.Abs(af-bf) / math.Abs(af), nil
+}
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	actualEpsilon, err := calcRelativeError(expected, actual)
+	if err != nil {
+		return Fail(t, err.Error(), msgAndArgs...)
+	}
+	if actualEpsilon > epsilon {
+		return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+
+			"        < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...)
+	}
+
+	return true
+}
+
+// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
+func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if expected == nil || actual == nil ||
+		reflect.TypeOf(actual).Kind() != reflect.Slice ||
+		reflect.TypeOf(expected).Kind() != reflect.Slice {
+		return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
+	}
+
+	actualSlice := reflect.ValueOf(actual)
+	expectedSlice := reflect.ValueOf(expected)
+
+	for i := 0; i < actualSlice.Len(); i++ {
+		result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon)
+		if !result {
+			return result
+		}
+	}
+
+	return true
+}
+
+/*
+	Errors
+*/
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+//
+//   actualObj, err := SomeFunction()
+//   if assert.NoError(t, err) {
+//	   assert.Equal(t, expectedObj, actualObj)
+//   }
+func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if err != nil {
+		return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...)
+	}
+
+	return true
+}
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+//
+//   actualObj, err := SomeFunction()
+//   if assert.Error(t, err) {
+//	   assert.Equal(t, expectedError, err)
+//   }
+func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	if err == nil {
+		return Fail(t, "An error is expected but got nil.", msgAndArgs...)
+	}
+
+	return true
+}
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+//   actualObj, err := SomeFunction()
+//   assert.EqualError(t, err,  expectedErrorString)
+func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if !Error(t, theError, msgAndArgs...) {
+		return false
+	}
+	expected := errString
+	actual := theError.Error()
+	// don't need to use deep equals here, we know they are both strings
+	if expected != actual {
+		return Fail(t, fmt.Sprintf("Error message not equal:\n"+
+			"expected: %q\n"+
+			"actual  : %q", expected, actual), msgAndArgs...)
+	}
+	return true
+}
+
+// matchRegexp return true if a specified regexp matches a string.
+func matchRegexp(rx interface{}, str interface{}) bool {
+
+	var r *regexp.Regexp
+	if rr, ok := rx.(*regexp.Regexp); ok {
+		r = rr
+	} else {
+		r = regexp.MustCompile(fmt.Sprint(rx))
+	}
+
+	return (r.FindStringIndex(fmt.Sprint(str)) != nil)
+
+}
+
+// Regexp asserts that a specified regexp matches a string.
+//
+//  assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
+//  assert.Regexp(t, "start...$", "it's not starting")
+func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	match := matchRegexp(rx, str)
+
+	if !match {
+		Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...)
+	}
+
+	return match
+}
+
+// NotRegexp asserts that a specified regexp does not match a string.
+//
+//  assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
+//  assert.NotRegexp(t, "^start", "it's not starting")
+func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	match := matchRegexp(rx, str)
+
+	if match {
+		Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...)
+	}
+
+	return !match
+
+}
+
+// Zero asserts that i is the zero value for its type.
+func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {
+		return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...)
+	}
+	return true
+}
+
+// NotZero asserts that i is not the zero value for its type.
+func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {
+		return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...)
+	}
+	return true
+}
+
+// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file.
+func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	info, err := os.Lstat(path)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...)
+		}
+		return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...)
+	}
+	if info.IsDir() {
+		return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...)
+	}
+	return true
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists.
+func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	info, err := os.Lstat(path)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...)
+		}
+		return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...)
+	}
+	if !info.IsDir() {
+		return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...)
+	}
+	return true
+}
+
+// JSONEq asserts that two JSON strings are equivalent.
+//
+//  assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	var expectedJSONAsInterface, actualJSONAsInterface interface{}
+
+	if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil {
+		return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...)
+	}
+
+	if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil {
+		return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...)
+	}
+
+	return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...)
+}
+
+func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {
+	t := reflect.TypeOf(v)
+	k := t.Kind()
+
+	if k == reflect.Ptr {
+		t = t.Elem()
+		k = t.Kind()
+	}
+	return t, k
+}
+
+// diff returns a diff of both values as long as both are of the same type and
+// are a struct, map, slice, array or string. Otherwise it returns an empty string.
+func diff(expected interface{}, actual interface{}) string {
+	if expected == nil || actual == nil {
+		return ""
+	}
+
+	et, ek := typeAndKind(expected)
+	at, _ := typeAndKind(actual)
+
+	if et != at {
+		return ""
+	}
+
+	if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String {
+		return ""
+	}
+
+	var e, a string
+	if et != reflect.TypeOf("") {
+		e = spewConfig.Sdump(expected)
+		a = spewConfig.Sdump(actual)
+	} else {
+		e = expected.(string)
+		a = actual.(string)
+	}
+
+	diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
+		A:        difflib.SplitLines(e),
+		B:        difflib.SplitLines(a),
+		FromFile: "Expected",
+		FromDate: "",
+		ToFile:   "Actual",
+		ToDate:   "",
+		Context:  1,
+	})
+
+	return "\n\nDiff:\n" + diff
+}
+
+// validateEqualArgs checks whether provided arguments can be safely used in the
+// Equal/NotEqual functions.
+func validateEqualArgs(expected, actual interface{}) error {
+	if isFunction(expected) || isFunction(actual) {
+		return errors.New("cannot take func type as argument")
+	}
+	return nil
+}
+
+func isFunction(arg interface{}) bool {
+	if arg == nil {
+		return false
+	}
+	return reflect.TypeOf(arg).Kind() == reflect.Func
+}
+
+var spewConfig = spew.ConfigState{
+	Indent:                  " ",
+	DisablePointerAddresses: true,
+	DisableCapacities:       true,
+	SortKeys:                true,
+}
+
+type tHelper interface {
+	Helper()
+}
diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go
new file mode 100644
index 0000000..c9dccc4
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/doc.go
@@ -0,0 +1,45 @@
+// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
+//
+// Example Usage
+//
+// The following is a complete example using assert in a standard test function:
+//    import (
+//      "testing"
+//      "github.com/stretchr/testify/assert"
+//    )
+//
+//    func TestSomething(t *testing.T) {
+//
+//      var a string = "Hello"
+//      var b string = "Hello"
+//
+//      assert.Equal(t, a, b, "The two words should be the same.")
+//
+//    }
+//
+// if you assert many times, use the format below:
+//
+//    import (
+//      "testing"
+//      "github.com/stretchr/testify/assert"
+//    )
+//
+//    func TestSomething(t *testing.T) {
+//      assert := assert.New(t)
+//
+//      var a string = "Hello"
+//      var b string = "Hello"
+//
+//      assert.Equal(a, b, "The two words should be the same.")
+//    }
+//
+// Assertions
+//
+// Assertions allow you to easily write test code, and are global funcs in the `assert` package.
+// All assertion functions take, as the first argument, the `*testing.T` object provided by the
+// testing framework. This allows the assertion funcs to write the failings and other details to
+// the correct place.
+//
+// Every assertion function also takes an optional string message as the final argument,
+// allowing custom error messages to be appended to the message the assertion method outputs.
+package assert
diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go
new file mode 100644
index 0000000..ac9dc9d
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/errors.go
@@ -0,0 +1,10 @@
+package assert
+
+import (
+	"errors"
+)
+
+// AnError is an error instance useful for testing.  If the code does not care
+// about error specifics, and only needs to return the error for example, this
+// error should be used to make the test code more readable.
+var AnError = errors.New("assert.AnError general error for testing")
diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go
new file mode 100644
index 0000000..9ad5685
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go
@@ -0,0 +1,16 @@
+package assert
+
+// Assertions provides assertion methods around the
+// TestingT interface.
+type Assertions struct {
+	t TestingT
+}
+
+// New makes a new Assertions object for the specified TestingT.
+func New(t TestingT) *Assertions {
+	return &Assertions{
+		t: t,
+	}
+}
+
+//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs
diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go
new file mode 100644
index 0000000..df46fa7
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go
@@ -0,0 +1,143 @@
+package assert
+
+import (
+	"fmt"
+	"net/http"
+	"net/http/httptest"
+	"net/url"
+	"strings"
+)
+
+// httpCode is a helper that returns HTTP code of the response. It returns -1 and
+// an error if building a new request fails.
+func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
+	w := httptest.NewRecorder()
+	req, err := http.NewRequest(method, url, nil)
+	if err != nil {
+		return -1, err
+	}
+	req.URL.RawQuery = values.Encode()
+	handler(w, req)
+	return w.Code, nil
+}
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+//
+//  assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	code, err := httpCode(handler, method, url, values)
+	if err != nil {
+		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+		return false
+	}
+
+	isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
+	if !isSuccessCode {
+		Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code))
+	}
+
+	return isSuccessCode
+}
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+//
+//  assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	code, err := httpCode(handler, method, url, values)
+	if err != nil {
+		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+		return false
+	}
+
+	isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
+	if !isRedirectCode {
+		Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code))
+	}
+
+	return isRedirectCode
+}
+
+// HTTPError asserts that a specified handler returns an error status code.
+//
+//  assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	code, err := httpCode(handler, method, url, values)
+	if err != nil {
+		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+		return false
+	}
+
+	isErrorCode := code >= http.StatusBadRequest
+	if !isErrorCode {
+		Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code))
+	}
+
+	return isErrorCode
+}
+
+// HTTPBody is a helper that returns HTTP body of the response. It returns
+// empty string if building a new request fails.
+func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
+	w := httptest.NewRecorder()
+	req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
+	if err != nil {
+		return ""
+	}
+	handler(w, req)
+	return w.Body.String()
+}
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+//
+//  assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	body := HTTPBody(handler, method, url, values)
+
+	contains := strings.Contains(body, fmt.Sprint(str))
+	if !contains {
+		Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+	}
+
+	return contains
+}
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+//
+//  assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	body := HTTPBody(handler, method, url, values)
+
+	contains := strings.Contains(body, fmt.Sprint(str))
+	if contains {
+		Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+	}
+
+	return !contains
+}